From 0c13ef43c7c17dfb4f9b9a0f9c9f6caa17cf6af3 Mon Sep 17 00:00:00 2001 From: Toni Harzendorf Date: Sat, 3 Jun 2023 19:42:11 +0200 Subject: [PATCH 01/28] Implement slurmdb Association API --- pyslurm/db/__init__.py | 5 +- pyslurm/db/assoc.pxd | 35 +----- pyslurm/db/assoc.pyx | 279 ++++++++++++++--------------------------- pyslurm/db/util.pyx | 4 +- 4 files changed, 99 insertions(+), 224 deletions(-) diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index 0e78a734..cfac2d31 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -25,7 +25,6 @@ from .job import ( Job, Jobs, - JobFilter, JobSearchFilter, ) from .tres import ( @@ -35,11 +34,9 @@ from .qos import ( QualitiesOfService, QualityOfService, - QualityOfServiceFilter, + QualityOfServiceSearchFilter, ) from .assoc import ( Associations, Association, - AssociationFilter, ) -from . import cluster diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 12a0cde1..6189e447 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -29,8 +29,6 @@ from pyslurm.slurm cimport ( slurmdb_associations_get, slurmdb_destroy_assoc_rec, slurmdb_destroy_assoc_cond, - slurmdb_init_assoc_rec, - slurmdb_associations_modify, try_xmalloc, ) from pyslurm.db.util cimport ( @@ -40,47 +38,24 @@ from pyslurm.db.util cimport ( slurm_list_to_pylist, qos_list_to_pylist, ) -from pyslurm.db.tres cimport ( - _set_tres_limits, - TrackableResources, - TrackableResourceLimits, -) from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.utils.uint cimport * -from pyslurm.db.qos cimport QualitiesOfService, _set_qos_list - -cdef _parse_assoc_ptr(Association ass) -cdef _create_assoc_ptr(Association ass, conn=*) +from pyslurm.db.qos cimport QualitiesOfService -cdef class Associations(list): - pass +cdef class Associations(dict): + cdef SlurmList info -cdef class AssociationFilter: +cdef class AssociationSearchFilter: cdef slurmdb_assoc_cond_t *ptr - cdef public: - users - ids - cdef class Association: cdef: slurmdb_assoc_rec_t *ptr - dict qos_data - dict tres_data - - cdef public: - group_tres - group_tres_mins - group_tres_run_mins - max_tres_mins_per_job - max_tres_run_mins_per_user - max_tres_per_job - max_tres_per_node - qos + QualitiesOfService qos_data @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr) diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index d1ac4789..1c197751 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -23,148 +23,51 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import ( - instance_to_dict, - collection_to_dict, - group_collection_by_cluster, - user_to_uid, -) +from pyslurm.utils.helpers import instance_to_dict from pyslurm.utils.uint import * -from pyslurm.db.connection import _open_conn_or_error -from pyslurm.db.cluster import LOCAL_CLUSTER -cdef class Associations(list): +cdef class Associations(dict): def __init__(self): pass - def as_dict(self, recursive=False, group_by_cluster=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - group_by_cluster (bool, optional): - By default, only the Jobs from your local Cluster are - returned. If this is set to `True`, then all the Jobs in the - collection will be grouped by the Cluster - with the name of - the cluster as the key and the value being the collection as - another dict. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Association.id, - recursive=recursive) - if not group_by_cluster: - return col.get(LOCAL_CLUSTER, {}) - - return col - - def group_by_cluster(self): - return group_collection_by_cluster(self) - @staticmethod - def load(AssociationFilter db_filter=None, Connection db_connection=None): + def load(AssociationSearchFilter search_filter=None, + Connection db_connection=None): cdef: - Associations out = Associations() + Associations assoc_dict = Associations() Association assoc - AssociationFilter cond = db_filter - SlurmList assoc_data + AssociationSearchFilter cond = search_filter SlurmListItem assoc_ptr - Connection conn - dict qos_data - dict tres_data + Connection conn = db_connection + QualitiesOfService qos_data - # Prepare SQL Filter - if not db_filter: - cond = AssociationFilter() + if not search_filter: + cond = AssociationSearchFilter() cond._create() - # Setup DB Conn - conn = _open_conn_or_error(db_connection) + if not conn: + conn = Connection.open() - # Fetch Assoc Data - assoc_data = SlurmList.wrap(slurmdb_associations_get( - conn.ptr, cond.ptr)) + assoc_dict.info = SlurmList.wrap( + slurmdb_associations_get(conn.ptr, cond.ptr)) - if assoc_data.is_null: + if assoc_dict.info.is_null: raise RPCError(msg="Failed to get Association data from slurmdbd") - # Fetch other necessary dependencies needed for translating some - # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(db_connection=conn).as_dict( - name_is_key=False) - tres_data = TrackableResources.load(db_connection=conn).as_dict( - name_is_key=False) + qos_data = QualitiesOfService.load(name_is_key=False, + db_connection=conn) - # Setup Association objects - for assoc_ptr in SlurmList.iter_and_pop(assoc_data): + for assoc_ptr in SlurmList.iter_and_pop(assoc_dict.info): assoc = Association.from_ptr(assoc_ptr.data) assoc.qos_data = qos_data - assoc.tres_data = tres_data - _parse_assoc_ptr(assoc) - out.append(assoc) + assoc_dict[assoc.id] = assoc - return out + return assoc_dict - @staticmethod - def modify(db_filter, Association changes, Connection db_connection=None): - cdef: - AssociationFilter afilter - Connection conn - SlurmList response - SlurmListItem response_ptr - list out = [] - - # Prepare SQL Filter - if isinstance(db_filter, Associations): - assoc_ids = [ass.id for ass in db_filter] - afilter = AssociationFilter(ids=assoc_ids) - else: - afilter = db_filter - afilter._create() - - # Setup DB conn - conn = _open_conn_or_error(db_connection) - - # Any data that isn't parsed yet or needs validation is done in this - # function. - _create_assoc_ptr(changes, conn) - - # Modify associations, get the result - # This returns a List of char* with the associations that were - # modified - response = SlurmList.wrap(slurmdb_associations_modify( - conn.ptr, afilter.ptr, changes.ptr)) - - if not response.is_null and response.cnt: - for response_ptr in response: - response_str = cstr.to_unicode(response_ptr.data) - if not response_str: - continue - - # TODO: Better format - out.append(response_str) - - elif not response.is_null: - # There was no real error, but simply nothing has been modified - raise RPCError(msg="Nothing was modified") - else: - # Autodetects the last slurm error - raise RPCError() - - if not db_connection: - # Autocommit if no connection was explicitly specified. - conn.commit() - return out - - -cdef class AssociationFilter: +cdef class AssociationSearchFilter: def __cinit__(self): self.ptr = NULL @@ -186,29 +89,18 @@ cdef class AssociationFilter: if not self.ptr: raise MemoryError("xmalloc failed for slurmdb_assoc_cond_t") - def _parse_users(self): - if not self.users: - return None - return list({user_to_uid(user) for user in self.users}) - def _create(self): self._alloc() cdef slurmdb_assoc_cond_t *ptr = self.ptr - make_char_list(&ptr.user_list, self.users) - cdef class Association: def __cinit__(self): self.ptr = NULL - def __init__(self, **kwargs): + def __init__(self): self._alloc_impl() - self.id = 0 - self.cluster = LOCAL_CLUSTER - for k, v in kwargs.items(): - setattr(self, k, v) def __dealloc__(self): self._dealloc_impl() @@ -224,8 +116,6 @@ cdef class Association: if not self.ptr: raise MemoryError("xmalloc failed for slurmdb_assoc_rec_t") - slurmdb_init_assoc_rec(self.ptr, 0) - @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): cdef Association wrap = Association.__new__(Association) @@ -240,10 +130,9 @@ cdef class Association: """ return instance_to_dict(self) - def __eq__(self, other): - if isinstance(other, Association): - return self.id == other.id and self.cluster == other.cluster - return NotImplemented + @staticmethod + def load(name): + pass @property def account(self): @@ -297,6 +186,30 @@ cdef class Association: def group_submit_jobs(self, val): self.ptr.grp_submit_jobs = u32(val, zero_is_noval=False) + @property + def group_tres(self): + return cstr.to_dict(self.ptr.grp_tres) + + @group_tres.setter + def group_tres(self, val): + cstr.from_dict(&self.ptr.grp_tres, val) + + @property + def group_tres_mins(self): + return cstr.to_dict(self.ptr.grp_tres_mins) + + @group_tres_mins.setter + def group_tres_mins(self, val): + cstr.from_dict(&self.ptr.grp_tres_mins, val) + + @property + def group_tres_run_mins(self): + return cstr.to_dict(self.ptr.grp_tres_run_mins) + + @group_tres_run_mins.setter + def group_tres_run_mins(self, val): + cstr.from_dict(&self.ptr.grp_tres_run_mins, val) + @property def group_wall_time(self): return u32_parse(self.ptr.grp_wall, zero_is_noval=False) @@ -307,7 +220,7 @@ cdef class Association: @property def id(self): - return u32_parse(self.ptr.id) + return self.ptr.id @id.setter def id(self, val): @@ -319,7 +232,7 @@ cdef class Association: @property def lft(self): - return u32_parse(self.ptr.lft) + return self.ptr.lft @property def max_jobs(self): @@ -345,6 +258,38 @@ cdef class Association: def max_submit_jobs(self, val): self.ptr.max_submit_jobs = u32(val, zero_is_noval=False) + @property + def max_tres_mins_per_job(self): + return cstr.to_dict(self.ptr.max_tres_mins_pj) + + @max_tres_mins_per_job.setter + def max_tres_mins_per_job(self, val): + cstr.from_dict(&self.ptr.max_tres_mins_pj, val) + + @property + def max_tres_run_mins_per_user(self): + return cstr.to_dict(self.ptr.max_tres_run_mins) + + @max_tres_run_mins_per_user.setter + def max_tres_run_mins_per_user(self, val): + cstr.from_dict(&self.ptr.max_tres_run_mins, val) + + @property + def max_tres_per_job(self): + return cstr.to_dict(self.ptr.max_tres_pj) + + @max_tres_per_job.setter + def max_tres_per_job(self, val): + cstr.from_dict(&self.ptr.max_tres_pj, val) + + @property + def max_tres_per_node(self): + return cstr.to_dict(self.ptr.max_tres_pn) + + @max_tres_per_node.setter + def max_tres_per_node(self, val): + cstr.from_dict(&self.ptr.max_tres_pn, val) + @property def max_wall_time_per_job(self): return u32_parse(self.ptr.max_wall_pj, zero_is_noval=False) @@ -385,9 +330,17 @@ cdef class Association: def priority(self, val): self.ptr.priority = u32(val) + @property + def qos(self): + return qos_list_to_pylist(self.ptr.qos_list, self.qos_data) + + @qos.setter + def qos(self, val): + make_char_list(&self.ptr.qos_list, val) + @property def rgt(self): - return u32_parse(self.ptr.rgt) + return self.ptr.rgt @property def shares(self): @@ -405,51 +358,3 @@ cdef class Association: def user(self, val): cstr.fmalloc(&self.ptr.user, val) - -cdef _parse_assoc_ptr(Association ass): - cdef: - dict tres = ass.tres_data - dict qos = ass.qos_data - - ass.group_tres = TrackableResourceLimits.from_ids( - ass.ptr.grp_tres, tres) - ass.group_tres_mins = TrackableResourceLimits.from_ids( - ass.ptr.grp_tres_mins, tres) - ass.group_tres_run_mins = TrackableResourceLimits.from_ids( - ass.ptr.grp_tres_mins, tres) - ass.max_tres_mins_per_job = TrackableResourceLimits.from_ids( - ass.ptr.max_tres_mins_pj, tres) - ass.max_tres_run_mins_per_user = TrackableResourceLimits.from_ids( - ass.ptr.max_tres_run_mins, tres) - ass.max_tres_per_job = TrackableResourceLimits.from_ids( - ass.ptr.max_tres_pj, tres) - ass.max_tres_per_node = TrackableResourceLimits.from_ids( - ass.ptr.max_tres_pn, tres) - ass.qos = qos_list_to_pylist(ass.ptr.qos_list, qos) - - -cdef _create_assoc_ptr(Association ass, conn=None): - # _set_tres_limits will also check if specified TRES are valid and - # translate them to its ID which is why we need to load the current TRES - # available in the system. - ass.tres_data = TrackableResources.load(db_connection=conn) - _set_tres_limits(&ass.ptr.grp_tres, ass.group_tres, ass.tres_data) - _set_tres_limits(&ass.ptr.grp_tres_mins, ass.group_tres_mins, - ass.tres_data) - _set_tres_limits(&ass.ptr.grp_tres_run_mins, ass.group_tres_run_mins, - ass.tres_data) - _set_tres_limits(&ass.ptr.max_tres_mins_pj, ass.max_tres_mins_per_job, - ass.tres_data) - _set_tres_limits(&ass.ptr.max_tres_run_mins, ass.max_tres_run_mins_per_user, - ass.tres_data) - _set_tres_limits(&ass.ptr.max_tres_pj, ass.max_tres_per_job, - ass.tres_data) - _set_tres_limits(&ass.ptr.max_tres_pn, ass.max_tres_per_node, - ass.tres_data) - - # _set_qos_list will also check if specified QoS are valid and translate - # them to its ID, which is why we need to load the current QOS available - # in the system. - ass.qos_data = QualitiesOfService.load(db_connection=conn) - _set_qos_list(&ass.ptr.qos_list, self.qos, ass.qos_data) - diff --git a/pyslurm/db/util.pyx b/pyslurm/db/util.pyx index 672886c2..78598637 100644 --- a/pyslurm/db/util.pyx +++ b/pyslurm/db/util.pyx @@ -44,10 +44,8 @@ cdef slurm_list_to_pylist(List in_list): cdef qos_list_to_pylist(List in_list, qos_data): - if not in_list: - return [] - cdef list qos_nums = SlurmList.wrap(in_list, owned=False).to_pylist() + return [qos.name for qos_id, qos in qos_data.items() if qos_id in qos_nums] From 06a2379b854de449ef9d25f87f5d63cc640c4692 Mon Sep 17 00:00:00 2001 From: tazend Date: Mon, 5 Jun 2023 20:36:45 +0200 Subject: [PATCH 02/28] wip --- pyslurm/db/assoc.pxd | 5 + pyslurm/db/assoc.pyx | 10 ++ pyslurm/db/tres.pxd | 3 + pyslurm/db/tres.pyx | 252 +++------------------------------------- pyslurm/db/util.pyx | 4 +- pyslurm/slurm/extra.pxi | 4 - 6 files changed, 36 insertions(+), 242 deletions(-) diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 6189e447..6e0caac0 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -29,6 +29,7 @@ from pyslurm.slurm cimport ( slurmdb_associations_get, slurmdb_destroy_assoc_rec, slurmdb_destroy_assoc_cond, + slurmdb_init_assoc_rec, try_xmalloc, ) from pyslurm.db.util cimport ( @@ -38,6 +39,10 @@ from pyslurm.db.util cimport ( slurm_list_to_pylist, qos_list_to_pylist, ) +from pyslurm.db.tres cimport ( + find_tres_limit, + merge_tres_str, +) from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.utils.uint cimport * diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 1c197751..8ce332f8 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -116,6 +116,8 @@ cdef class Association: if not self.ptr: raise MemoryError("xmalloc failed for slurmdb_assoc_rec_t") + slurmdb_init_assoc_rec(self.ptr, 0) + @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): cdef Association wrap = Association.__new__(Association) @@ -194,6 +196,14 @@ cdef class Association: def group_tres(self, val): cstr.from_dict(&self.ptr.grp_tres, val) + @property + def group_cpus(self): + return find_tres_limit(self.ptr.grp_tres, slurm.TRES_CPU) + + @group_cpus.setter + def group_cpus(self, val): + merge_tres_str(&self.ptr.grp_tres, slurm.TRES_CPU, val) + @property def group_tres_mins(self): return cstr.to_dict(self.ptr.grp_tres_mins) diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index 41ed1b4d..a416c75f 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -46,6 +46,9 @@ cdef _tres_ids_to_names(char *tres_str, dict tres_data) cdef _set_tres_limits(char **dest, TrackableResourceLimits src, TrackableResources tres_data) +cdef find_tres_limit(char *tres_str, typ) +cdef merge_tres_str(char **tres_str, typ, val) + cdef class TrackableResourceLimits: diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index df93dda0..6a5535b9 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -24,174 +24,13 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED -from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global -from pyslurm.utils import cstr -from pyslurm.db.connection import _open_conn_or_error -import json -TRES_TYPE_DELIM = "/" - - -cdef class TrackableResourceLimits: - - def __init__(self, **kwargs): - self.fs = {} - self.gres = {} - self.license = {} - - for k, v in kwargs.items(): - if TRES_TYPE_DELIM in k: - typ, name = self._unflatten_tres(k) - cur_val = getattr(self, typ) - - if not isinstance(cur_val, dict): - raise ValueError(f"TRES Type {typ} cannot have a name " - f"({name}). Invalid Value: {typ}/{name}") - - cur_val.update({name : int(v)}) - setattr(self, typ, cur_val) - else: - setattr(self, k, v) - - @staticmethod - cdef from_ids(char *tres_id_str, dict tres_data): - tres_list = _tres_ids_to_names(tres_id_str, tres_data) - if not tres_list: - return None - - cdef TrackableResourceLimits out = TrackableResourceLimits() - - for tres in tres_list: - typ, name, cnt = tres - cur_val = getattr(out, typ, slurm.NO_VAL64) - if cur_val != slurm.NO_VAL64: - if isinstance(cur_val, dict): - cur_val.update({name : cnt}) - setattr(out, typ, cur_val) - else: - setattr(out, typ, cnt) - - return out - - def _validate(self, TrackableResources tres_data): - id_dict = _tres_names_to_ids(self.as_dict(flatten_limits=True), - tres_data) - return id_dict - - def _unflatten_tres(self, type_and_name): - typ, name = type_and_name.split(TRES_TYPE_DELIM, 1) - return typ, name - - def _flatten_tres(self, typ, vals): - cdef dict out = {} - for name, cnt in vals.items(): - out[f"{typ}{TRES_TYPE_DELIM}{name}"] = cnt - - return out - - def as_dict(self, flatten_limits=False): - cdef dict inst_dict = instance_to_dict(self) - - if flatten_limits: - vals = inst_dict.pop("fs") - inst_dict.update(self._flatten_tres("fs", vals)) - - vals = inst_dict.pop("license") - inst_dict.update(self._flatten_tres("license", vals)) - - vals = inst_dict.pop("gres") - inst_dict.update(self._flatten_tres("gres", vals)) - - return inst_dict - - -cdef class TrackableResourceFilter: - - def __cinit__(self): - self.ptr = NULL - - def __init__(self, **kwargs): - for k, v in kwargs.items(): - setattr(self, k, v) - - def __dealloc__(self): - self._dealloc() - - def _dealloc(self): - slurmdb_destroy_tres_cond(self.ptr) - self.ptr = NULL - - def _alloc(self): - self._dealloc() - self.ptr = try_xmalloc(sizeof(slurmdb_tres_cond_t)) - if not self.ptr: - raise MemoryError("xmalloc failed for slurmdb_tres_cond_t") - slurmdb_init_tres_cond(self.ptr, 0) - - def _create(self): - self._alloc() - - -cdef class TrackableResources(list): +cdef class TrackableResources(dict): def __init__(self): pass - def as_dict(self, recursive=False, name_is_key=True): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - name_is_key (bool, optional): - By default, the keys in this dict are the names of each TRES. - If this is set to `False`, then the unique ID of the TRES will - be used as dict keys. - - Returns: - (dict): Collection as a dict. - """ - identifier = TrackableResource.type_and_name - if not name_is_key: - identifier = TrackableResource.id - - return collection_to_dict_global(self, identifier=identifier, - recursive=recursive) - - @staticmethod - def load(Connection db_connection=None): - cdef: - TrackableResources out = TrackableResources() - TrackableResource tres - Connection conn - SlurmList tres_data - SlurmListItem tres_ptr - TrackableResourceFilter db_filter = TrackableResourceFilter() - - # Prepare SQL Filter - db_filter._create() - - # Setup DB Conn - conn = _open_conn_or_error(db_connection) - - # Fetch TRES data - tres_data = SlurmList.wrap(slurmdb_tres_get(conn.ptr, db_filter.ptr)) - - if tres_data.is_null: - raise RPCError(msg="Failed to get TRES data from slurmdbd") - - # Setup TRES objects - for tres_ptr in SlurmList.iter_and_pop(tres_data): - tres = TrackableResource.from_ptr( - tres_ptr.data) - out.append(tres) - - return out - @staticmethod cdef TrackableResources from_str(char *tres_str): cdef: @@ -214,7 +53,17 @@ cdef class TrackableResources(list): @staticmethod cdef find_count_in_str(char *tres_str, typ, on_noval=0, on_inf=0): - return find_tres_count(tres_str, typ, on_noval, on_inf) + if not tres_str: + return on_noval + + cdef uint64_t tmp + tmp = slurmdb_find_tres_count_in_string(tres_str, typ) + if tmp == slurm.INFINITE64: + return on_inf + elif tmp == slurm.NO_VAL64: + return on_noval + else: + return tmp cdef class TrackableResource: @@ -246,9 +95,6 @@ cdef class TrackableResource: wrap.ptr = in_ptr return wrap - def as_dict(self): - return instance_to_dict(self) - @property def id(self): return self.ptr.id @@ -261,14 +107,6 @@ cdef class TrackableResource: def type(self): return cstr.to_unicode(self.ptr.type) - @property - def type_and_name(self): - type_and_name = self.type - if self.name: - type_and_name = f"{type_and_name}{TRES_TYPE_DELIM}{self.name}" - - return type_and_name - @property def count(self): return u64_parse(self.ptr.count) @@ -277,26 +115,13 @@ cdef class TrackableResource: # alloc_secs -cdef find_tres_count(char *tres_str, typ, on_noval=0, on_inf=0): - if not tres_str: - return on_noval - - cdef uint64_t tmp - tmp = slurmdb_find_tres_count_in_string(tres_str, typ) - if tmp == slurm.INFINITE64: - return on_inf - elif tmp == slurm.NO_VAL64: - return on_noval - else: - return tmp - - cdef find_tres_limit(char *tres_str, typ): - return find_tres_count(tres_str, typ, on_noval=None, on_inf=UNLIMITED) + return TrackableResources.find_count_in_str(tres_str, typ, on_noval=None, + on_inf=UNLIMITED) cdef merge_tres_str(char **tres_str, typ, val): - cdef uint64_t _val = u64(dehumanize(val)) + cdef uint64_t _val = u64(val) current = cstr.to_dict(tres_str[0]) if _val == slurm.NO_VAL64: @@ -305,50 +130,3 @@ cdef merge_tres_str(char **tres_str, typ, val): current.update({typ : _val}) cstr.from_dict(tres_str, current) - - -cdef _tres_ids_to_names(char *tres_str, dict tres_data): - if not tres_str: - return None - - cdef: - dict tdict = cstr.to_dict(tres_str) - list out = [] - - if not tres_data: - return None - - for tid, cnt in tdict.items(): - if isinstance(tid, str) and tid.isdigit(): - _tid = int(tid) - if _tid in tres_data: - out.append( - (tres_data[_tid].type, tres_data[_tid].name, int(cnt)) - ) - - return out - - -def _tres_names_to_ids(dict tres_dict, TrackableResources tres_data): - cdef dict out = {} - if not tres_dict: - return out - - for tid, cnt in tres_dict.items(): - real_id = _validate_tres_single(tid, tres_data) - out[real_id] = cnt - - return out - - -def _validate_tres_single(tid, TrackableResources tres_data): - for tres in tres_data: - if tid == tres.id or tid == tres.type_and_name: - return tres.id - - raise ValueError(f"Invalid TRES specified: {tid}") - - -cdef _set_tres_limits(char **dest, TrackableResourceLimits src, - TrackableResources tres_data): - cstr.from_dict(dest, src._validate(tres_data)) diff --git a/pyslurm/db/util.pyx b/pyslurm/db/util.pyx index 78598637..672886c2 100644 --- a/pyslurm/db/util.pyx +++ b/pyslurm/db/util.pyx @@ -44,8 +44,10 @@ cdef slurm_list_to_pylist(List in_list): cdef qos_list_to_pylist(List in_list, qos_data): - cdef list qos_nums = SlurmList.wrap(in_list, owned=False).to_pylist() + if not in_list: + return [] + cdef list qos_nums = SlurmList.wrap(in_list, owned=False).to_pylist() return [qos.name for qos_id, qos in qos_data.items() if qos_id in qos_nums] diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index 3557b0b9..8b6ce5eb 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -165,9 +165,6 @@ ctypedef enum tres_types_t: # Global Environment cdef extern char **environ -# Local slurm config -cdef extern slurm_conf_t slurm_conf - # # Slurm Memory routines # We simply use the macros from xmalloc.h - more convenient @@ -276,7 +273,6 @@ cdef extern void slurmdb_job_cond_def_start_end(slurmdb_job_cond_t *job_cond) cdef extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id) cdef extern slurmdb_job_rec_t *slurmdb_create_job_rec() cdef extern void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t *assoc, bool free_it) -cdef extern void slurmdb_init_tres_cond(slurmdb_tres_cond_t *tres, bool free_it) # # Slurm Partition functions From 0280d4678cb3d4a1a63c80e5a782d588e8eae176 Mon Sep 17 00:00:00 2001 From: tazend Date: Fri, 9 Jun 2023 23:10:22 +0200 Subject: [PATCH 03/28] wip --- pyslurm/db/__init__.py | 1 + pyslurm/db/assoc.pxd | 10 ++- pyslurm/db/assoc.pyx | 118 ++++++++++++++++++++++++++------ pyslurm/db/tres.pxd | 27 +------- pyslurm/db/tres.pyx | 144 ++++++++++++++++++++++++++++++++++++---- pyslurm/slurm/extra.pxi | 1 + 6 files changed, 241 insertions(+), 60 deletions(-) diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index cfac2d31..377fec6e 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -39,4 +39,5 @@ from .assoc import ( Associations, Association, + AssociationFilter, ) diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 6e0caac0..2a0e6cb0 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -30,6 +30,7 @@ from pyslurm.slurm cimport ( slurmdb_destroy_assoc_rec, slurmdb_destroy_assoc_cond, slurmdb_init_assoc_rec, + slurmdb_associations_modify, try_xmalloc, ) from pyslurm.db.util cimport ( @@ -42,6 +43,8 @@ from pyslurm.db.util cimport ( from pyslurm.db.tres cimport ( find_tres_limit, merge_tres_str, + tres_ids_to_names, + TrackableResources, ) from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr @@ -53,14 +56,19 @@ cdef class Associations(dict): cdef SlurmList info -cdef class AssociationSearchFilter: +cdef class AssociationFilter: cdef slurmdb_assoc_cond_t *ptr + cdef public: + users + ids + cdef class Association: cdef: slurmdb_assoc_rec_t *ptr QualitiesOfService qos_data + TrackableResources tres_data @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr) diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 8ce332f8..5d8b5203 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -23,7 +23,11 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict +from pyslurm.utils.helpers import ( + instance_to_dict, + user_to_uid, +) +from pyslurm.db.tres import tres_names_to_ids from pyslurm.utils.uint import * @@ -33,23 +37,27 @@ cdef class Associations(dict): pass @staticmethod - def load(AssociationSearchFilter search_filter=None, + def load(AssociationFilter search_filter=None, Connection db_connection=None): cdef: Associations assoc_dict = Associations() Association assoc - AssociationSearchFilter cond = search_filter + AssociationFilter cond = search_filter SlurmListItem assoc_ptr Connection conn = db_connection QualitiesOfService qos_data + TrackableResources tres_data if not search_filter: - cond = AssociationSearchFilter() + cond = AssociationFilter() cond._create() if not conn: conn = Connection.open() + if not conn.is_open: + raise ValueError("Database connection is not open") + assoc_dict.info = SlurmList.wrap( slurmdb_associations_get(conn.ptr, cond.ptr)) @@ -58,16 +66,71 @@ cdef class Associations(dict): qos_data = QualitiesOfService.load(name_is_key=False, db_connection=conn) + tres_data = TrackableResources.load(name_is_key=False, + db_connection=conn) for assoc_ptr in SlurmList.iter_and_pop(assoc_dict.info): assoc = Association.from_ptr(assoc_ptr.data) assoc.qos_data = qos_data + assoc.tres_data = tres_data assoc_dict[assoc.id] = assoc return assoc_dict + @staticmethod + def modify(assocs, Association changes, + Connection db_connection=None): + cdef: + AssociationFilter afilter + Connection conn = db_connection + SlurmList response + SlurmListItem response_ptr + list out = [] + + if not conn: + conn = Connection.open() + + if not conn.is_open: + raise ValueError("Database connection is not open") + + if isinstance(assocs, Associations): + assoc_ids = list(assocs.keys()) + afilter = AssociationFilter(ids=assoc_ids) + else: + afilter = assocs + + # Check if TRES specified are actually valid. slurmdbd does not + # give an explicit error and just ignores invalid tres types. + changes._validate_tres() + + afilter._create() + response = SlurmList.wrap( + slurmdb_associations_modify(conn.ptr, afilter.ptr, changes.ptr)) + + if not response.is_null and response.cnt: + for response_ptr in response: + response_str = cstr.to_unicode(response_ptr.data) + if not response_str: + continue + + # TODO: Better format + out.append(response_str) + + elif not response.is_null: + # There was no real error, but simply nothing has been modified + raise RPCError(msg="Nothing was modified") + else: + # Autodetects the last slurm error + raise RPCError() + + if not db_connection: + # Autocommit if no connection was explicitly specified. + conn.commit() -cdef class AssociationSearchFilter: + return out + + +cdef class AssociationFilter: def __cinit__(self): self.ptr = NULL @@ -89,10 +152,17 @@ cdef class AssociationSearchFilter: if not self.ptr: raise MemoryError("xmalloc failed for slurmdb_assoc_cond_t") + def _parse_users(self): + if not self.users: + return None + return list({user_to_uid(user) for user in self.users}) + def _create(self): self._alloc() cdef slurmdb_assoc_cond_t *ptr = self.ptr + make_char_list(&ptr.user_list, self.users) + cdef class Association: @@ -132,6 +202,22 @@ cdef class Association: """ return instance_to_dict(self) + def _validate_tres(self): + self.tres_data = TrackableResources.load(name_is_key=False) + self.group_tres = tres_names_to_ids(self.group_tres, self.tres_data) + self.group_tres_mins = tres_names_to_ids( + self.group_tres_mins, self.tres_data) + self.group_tres_run_mins = tres_names_to_ids( + self.group_tres_run_mins, self.tres_data) + self.max_tres_mins_per_job = tres_names_to_ids( + self.max_tres_mins_per_job, self.tres_data) + self.max_tres_run_mins_per_user = tres_names_to_ids( + self.max_tres_run_mins_per_user, self.tres_data) + self.max_tres_per_job = tres_names_to_ids( + self.max_tres_per_job, self.tres_data) + self.max_tres_per_node = tres_names_to_ids( + self.max_tres_per_node, self.tres_data) + @staticmethod def load(name): pass @@ -190,23 +276,15 @@ cdef class Association: @property def group_tres(self): - return cstr.to_dict(self.ptr.grp_tres) + return tres_ids_to_names(self.ptr.grp_tres, self.tres_data) @group_tres.setter def group_tres(self, val): cstr.from_dict(&self.ptr.grp_tres, val) - @property - def group_cpus(self): - return find_tres_limit(self.ptr.grp_tres, slurm.TRES_CPU) - - @group_cpus.setter - def group_cpus(self, val): - merge_tres_str(&self.ptr.grp_tres, slurm.TRES_CPU, val) - @property def group_tres_mins(self): - return cstr.to_dict(self.ptr.grp_tres_mins) + return tres_ids_to_names(self.ptr.grp_tres_mins, self.tres_data) @group_tres_mins.setter def group_tres_mins(self, val): @@ -214,7 +292,7 @@ cdef class Association: @property def group_tres_run_mins(self): - return cstr.to_dict(self.ptr.grp_tres_run_mins) + return tres_ids_to_names(self.ptr.grp_tres_run_mins, self.tres_data) @group_tres_run_mins.setter def group_tres_run_mins(self, val): @@ -270,7 +348,7 @@ cdef class Association: @property def max_tres_mins_per_job(self): - return cstr.to_dict(self.ptr.max_tres_mins_pj) + return tres_ids_to_names(self.ptr.max_tres_mins_pj, self.tres_data) @max_tres_mins_per_job.setter def max_tres_mins_per_job(self, val): @@ -278,7 +356,7 @@ cdef class Association: @property def max_tres_run_mins_per_user(self): - return cstr.to_dict(self.ptr.max_tres_run_mins) + return tres_ids_to_names(self.ptr.max_tres_run_mins, self.tres_data) @max_tres_run_mins_per_user.setter def max_tres_run_mins_per_user(self, val): @@ -286,7 +364,7 @@ cdef class Association: @property def max_tres_per_job(self): - return cstr.to_dict(self.ptr.max_tres_pj) + return tres_ids_to_names(self.ptr.max_tres_pj, self.tres_data) @max_tres_per_job.setter def max_tres_per_job(self, val): @@ -294,7 +372,7 @@ cdef class Association: @property def max_tres_per_node(self): - return cstr.to_dict(self.ptr.max_tres_pn) + return tres_ids_to_names(self.ptr.max_tres_pn, self.tres_data) @max_tres_per_node.setter def max_tres_per_node(self, val): diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index a416c75f..3aa2a019 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -42,37 +42,14 @@ from pyslurm.db.connection cimport Connection cdef find_tres_count(char *tres_str, typ, on_noval=*, on_inf=*) cdef find_tres_limit(char *tres_str, typ) cdef merge_tres_str(char **tres_str, typ, val) -cdef _tres_ids_to_names(char *tres_str, dict tres_data) -cdef _set_tres_limits(char **dest, TrackableResourceLimits src, - TrackableResources tres_data) - -cdef find_tres_limit(char *tres_str, typ) -cdef merge_tres_str(char **tres_str, typ, val) - - -cdef class TrackableResourceLimits: - - cdef public: - cpu - mem - energy - node - billing - fs - vmem - pages - gres - license - - @staticmethod - cdef from_ids(char *tres_id_str, dict tres_data) +cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data) cdef class TrackableResourceFilter: cdef slurmdb_tres_cond_t *ptr -cdef class TrackableResources(list): +cdef class TrackableResources(dict): cdef public raw_str @staticmethod diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index 6a5535b9..550987c3 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -24,6 +24,34 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED +from pyslurm.core.error import RPCError + + +cdef class TrackableResourceFilter: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + self._dealloc() + + def _dealloc(self): + slurmdb_destroy_tres_cond(self.ptr) + self.ptr = NULL + + def _alloc(self): + self._dealloc() + self.ptr = try_xmalloc(sizeof(slurmdb_tres_cond_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_tres_cond_t") + slurmdb_init_tres_cond(self.ptr, 0) + + def _create(self): + self._alloc() cdef class TrackableResources(dict): @@ -31,6 +59,39 @@ cdef class TrackableResources(dict): def __init__(self): pass + @staticmethod + def load(Connection db_connection=None, name_is_key=True): + cdef: + TrackableResources out = TrackableResources() + TrackableResource tres + Connection conn = db_connection + SlurmList tres_list + SlurmListItem tres_ptr + TrackableResourceFilter db_filter = TrackableResourceFilter() + + if not conn: + conn = Connection.open() + + if not conn.is_open: + raise ValueError("Database connection is not open") + + db_filter._create() + tres_list = SlurmList.wrap(slurmdb_tres_get(conn.ptr, db_filter.ptr)) + + if tres_list.is_null: + raise RPCError(msg="Failed to get TRES data from slurmdbd") + + for tres_ptr in SlurmList.iter_and_pop(tres_list): + tres = TrackableResource.from_ptr( + tres_ptr.data) + + if name_is_key and tres.type: + out[tres.type_and_name] = tres + else: + out[tres.id] = tres + + return out + @staticmethod cdef TrackableResources from_str(char *tres_str): cdef: @@ -53,17 +114,7 @@ cdef class TrackableResources(dict): @staticmethod cdef find_count_in_str(char *tres_str, typ, on_noval=0, on_inf=0): - if not tres_str: - return on_noval - - cdef uint64_t tmp - tmp = slurmdb_find_tres_count_in_string(tres_str, typ) - if tmp == slurm.INFINITE64: - return on_inf - elif tmp == slurm.NO_VAL64: - return on_noval - else: - return tmp + return find_tres_count(tres_str, typ, on_noval, on_inf) cdef class TrackableResource: @@ -107,6 +158,14 @@ cdef class TrackableResource: def type(self): return cstr.to_unicode(self.ptr.type) + @property + def type_and_name(self): + type_and_name = self.type + if self.name: + type_and_name = f"{type_and_name}/{self.name}" + + return type_and_name + @property def count(self): return u64_parse(self.ptr.count) @@ -115,13 +174,26 @@ cdef class TrackableResource: # alloc_secs +cdef find_tres_count(char *tres_str, typ, on_noval=0, on_inf=0): + if not tres_str: + return on_noval + + cdef uint64_t tmp + tmp = slurmdb_find_tres_count_in_string(tres_str, typ) + if tmp == slurm.INFINITE64: + return on_inf + elif tmp == slurm.NO_VAL64: + return on_noval + else: + return tmp + + cdef find_tres_limit(char *tres_str, typ): - return TrackableResources.find_count_in_str(tres_str, typ, on_noval=None, - on_inf=UNLIMITED) + return find_tres_count(tres_str, typ, on_noval=None, on_inf=UNLIMITED) cdef merge_tres_str(char **tres_str, typ, val): - cdef uint64_t _val = u64(val) + cdef uint64_t _val = u64(dehumanize(val)) current = cstr.to_dict(tres_str[0]) if _val == slurm.NO_VAL64: @@ -130,3 +202,47 @@ cdef merge_tres_str(char **tres_str, typ, val): current.update({typ : _val}) cstr.from_dict(tres_str, current) + + +cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data): + if not tres_str: + return {} + + cdef: + dict tdict = cstr.to_dict(tres_str) + dict out = {} + + if not tres_data: + return tdict + + for tid, cnt in tdict.items(): + if isinstance(tid, str) and tid.isdigit(): + _tid = int(tid) + if _tid in tres_data: + out[tres_data[_tid].type_and_name] = cnt + continue + + # If we can't find the TRES ID in our data, return it raw. + out[tid] = cnt + + return out + + +def tres_names_to_ids(dict tres_dict, TrackableResources tres_data): + cdef dict out = {} + if not tres_dict: + return out + + for tid, cnt in tres_dict.items(): + real_id = validate_tres_single(tid, tres_data) + out[real_id] = cnt + + return out + + +def validate_tres_single(tid, TrackableResources tres_data): + for tres in tres_data.values(): + if tid == tres.id or tid == tres.type_and_name: + return tres.id + + raise ValueError(f"Invalid TRES specified: {tid}") diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index 8b6ce5eb..a15afd52 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -273,6 +273,7 @@ cdef extern void slurmdb_job_cond_def_start_end(slurmdb_job_cond_t *job_cond) cdef extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id) cdef extern slurmdb_job_rec_t *slurmdb_create_job_rec() cdef extern void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t *assoc, bool free_it) +cdef extern void slurmdb_init_tres_cond(slurmdb_tres_cond_t *tres, bool free_it) # # Slurm Partition functions From 77fbc945f101b6ef298527d5d0107aa488a7768d Mon Sep 17 00:00:00 2001 From: tazend Date: Sat, 10 Jun 2023 13:41:10 +0200 Subject: [PATCH 04/28] wip --- pyslurm/db/assoc.pxd | 10 +++++ pyslurm/db/assoc.pyx | 102 ++++++++++++++----------------------------- pyslurm/db/tres.pxd | 18 ++++++++ pyslurm/db/tres.pyx | 95 ++++++++++++++++++++++++++++++++++++---- 4 files changed, 147 insertions(+), 78 deletions(-) diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 2a0e6cb0..235ec5d5 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -45,6 +45,7 @@ from pyslurm.db.tres cimport ( merge_tres_str, tres_ids_to_names, TrackableResources, + TrackableResourceLimits, ) from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr @@ -70,6 +71,15 @@ cdef class Association: QualitiesOfService qos_data TrackableResources tres_data + cdef public: + group_tres + group_tres_mins + group_tres_run_mins + max_tres_mins_per_job + max_tres_run_mins_per_user + max_tres_per_job + max_tres_per_node + @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr) diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 5d8b5203..951af4f0 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -73,6 +73,7 @@ cdef class Associations(dict): assoc = Association.from_ptr(assoc_ptr.data) assoc.qos_data = qos_data assoc.tres_data = tres_data + assoc._parse_tres() assoc_dict[assoc.id] = assoc return assoc_dict @@ -194,6 +195,23 @@ cdef class Association: wrap.ptr = in_ptr return wrap + def _parse_tres(self): + cdef TrackableResources tres = self.tres_data + self.group_tres = TrackableResourceLimits.from_ids( + self.ptr.grp_tres, tres) + self.group_tres_mins = TrackableResourceLimits.from_ids( + self.ptr.grp_tres_mins, tres) + self.group_tres_run_mins = TrackableResourceLimits.from_ids( + self.ptr.grp_tres_mins, tres) + self.max_tres_mins_per_job = TrackableResourceLimits.from_ids( + self.ptr.max_tres_mins_pj, tres) + self.max_tres_run_mins_per_user = TrackableResourceLimits.from_ids( + self.ptr.max_tres_run_mins, tres) + self.max_tres_per_job = TrackableResourceLimits.from_ids( + self.ptr.max_tres_pj, tres) + self.max_tres_per_node = TrackableResourceLimits.from_ids( + self.ptr.max_tres_pn, tres) + def as_dict(self): """Database Association information formatted as a dictionary. @@ -204,19 +222,20 @@ cdef class Association: def _validate_tres(self): self.tres_data = TrackableResources.load(name_is_key=False) - self.group_tres = tres_names_to_ids(self.group_tres, self.tres_data) - self.group_tres_mins = tres_names_to_ids( - self.group_tres_mins, self.tres_data) - self.group_tres_run_mins = tres_names_to_ids( - self.group_tres_run_mins, self.tres_data) - self.max_tres_mins_per_job = tres_names_to_ids( - self.max_tres_mins_per_job, self.tres_data) - self.max_tres_run_mins_per_user = tres_names_to_ids( - self.max_tres_run_mins_per_user, self.tres_data) - self.max_tres_per_job = tres_names_to_ids( - self.max_tres_per_job, self.tres_data) - self.max_tres_per_node = tres_names_to_ids( - self.max_tres_per_node, self.tres_data) + cstr.from_dict(&self.ptr.grp_tres, + self.group_tres._validate(self.tres_data)) + cstr.from_dict(&self.ptr.grp_tres_mins, + self.group_tres_mins._validate(self.tres_data)) + cstr.from_dict(&self.ptr.grp_tres_run_mins, + self.group_tres_run_mins._validate(self.tres_data)) + cstr.from_dict(&self.ptr.max_tres_mins_pj, + self.max_tres_mins_per_job._validate(self.tres_data)) + cstr.from_dict(&self.ptr.max_tres_run_mins, + self.max_tres_run_mins_per_user._validate(self.tres_data)) + cstr.from_dict(&self.ptr.max_tres_pj, + self.max_tres_per_job._validate(self.tres_data)) + cstr.from_dict(&self.ptr.max_tres_pn, + self.max_tres_per_node._validate(self.tres_data)) @staticmethod def load(name): @@ -274,30 +293,6 @@ cdef class Association: def group_submit_jobs(self, val): self.ptr.grp_submit_jobs = u32(val, zero_is_noval=False) - @property - def group_tres(self): - return tres_ids_to_names(self.ptr.grp_tres, self.tres_data) - - @group_tres.setter - def group_tres(self, val): - cstr.from_dict(&self.ptr.grp_tres, val) - - @property - def group_tres_mins(self): - return tres_ids_to_names(self.ptr.grp_tres_mins, self.tres_data) - - @group_tres_mins.setter - def group_tres_mins(self, val): - cstr.from_dict(&self.ptr.grp_tres_mins, val) - - @property - def group_tres_run_mins(self): - return tres_ids_to_names(self.ptr.grp_tres_run_mins, self.tres_data) - - @group_tres_run_mins.setter - def group_tres_run_mins(self, val): - cstr.from_dict(&self.ptr.grp_tres_run_mins, val) - @property def group_wall_time(self): return u32_parse(self.ptr.grp_wall, zero_is_noval=False) @@ -346,38 +341,6 @@ cdef class Association: def max_submit_jobs(self, val): self.ptr.max_submit_jobs = u32(val, zero_is_noval=False) - @property - def max_tres_mins_per_job(self): - return tres_ids_to_names(self.ptr.max_tres_mins_pj, self.tres_data) - - @max_tres_mins_per_job.setter - def max_tres_mins_per_job(self, val): - cstr.from_dict(&self.ptr.max_tres_mins_pj, val) - - @property - def max_tres_run_mins_per_user(self): - return tres_ids_to_names(self.ptr.max_tres_run_mins, self.tres_data) - - @max_tres_run_mins_per_user.setter - def max_tres_run_mins_per_user(self, val): - cstr.from_dict(&self.ptr.max_tres_run_mins, val) - - @property - def max_tres_per_job(self): - return tres_ids_to_names(self.ptr.max_tres_pj, self.tres_data) - - @max_tres_per_job.setter - def max_tres_per_job(self, val): - cstr.from_dict(&self.ptr.max_tres_pj, val) - - @property - def max_tres_per_node(self): - return tres_ids_to_names(self.ptr.max_tres_pn, self.tres_data) - - @max_tres_per_node.setter - def max_tres_per_node(self, val): - cstr.from_dict(&self.ptr.max_tres_pn, val) - @property def max_wall_time_per_job(self): return u32_parse(self.ptr.max_wall_pj, zero_is_noval=False) @@ -424,6 +387,7 @@ cdef class Association: @qos.setter def qos(self, val): + # TODO: must be ids make_char_list(&self.ptr.qos_list, val) @property diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index 3aa2a019..a37fd9bf 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -45,6 +45,24 @@ cdef merge_tres_str(char **tres_str, typ, val) cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data) +cdef class TrackableResourceLimits: + + cdef public: + cpu + mem + energy + node + billing + fs + vmem + pages + gres + license + + @staticmethod + cdef from_ids(char *tres_id_str, TrackableResources tres_data) + + cdef class TrackableResourceFilter: cdef slurmdb_tres_cond_t *ptr diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index 550987c3..4ba94792 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -25,6 +25,85 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED from pyslurm.core.error import RPCError +from pyslurm.utils.helpers import instance_to_dict +from pyslurm.utils import cstr +import json + + +TRES_TYPE_DELIM = "/" + + +cdef class TrackableResourceLimits: + + def __init__(self, **kwargs): + self.fs = {} + self.gres = {} + self.license = {} + + for k, v in kwargs.items(): + if TRES_TYPE_DELIM in k: + typ, name = self._unflatten_tres(k) + cur_val = getattr(self, typ) + + if not isinstance(cur_val, dict): + raise ValueError(f"TRES Type {typ} cannot have a name " + f"({name}). Invalid Value: {typ}/{name}") + + cur_val.update({name : int(v)}) + setattr(self, typ, cur_val) + else: + setattr(self, k, v) + + @staticmethod + cdef from_ids(char *tres_id_str, TrackableResources tres_data): + tres_list = tres_ids_to_names(tres_id_str, tres_data) + if not tres_list: + return None + + cdef TrackableResourceLimits out = TrackableResourceLimits() + + for tres in tres_list: + typ, name, cnt = tres + cur_val = getattr(out, typ, slurm.NO_VAL64) + if cur_val != slurm.NO_VAL64: + if isinstance(cur_val, dict): + cur_val.update({name : cnt}) + setattr(out, typ, cur_val) + else: + setattr(out, typ, cnt) + + return out + + def _validate(self, TrackableResources tres_data): + id_dict = tres_names_to_ids(self.as_dict(flatten_limits=True), + tres_data) + return id_dict + + def _unflatten_tres(self, type_and_name): + typ, name = type_and_name.split(TRES_TYPE_DELIM, 1) + return typ, name + + def _flatten_tres(self, typ, vals): + cdef dict out = {} + for name, cnt in vals.items(): + out[f"{typ}{TRES_TYPE_DELIM}{name}"] = cnt + + return out + + def as_dict(self, flatten_limits=False): + cdef dict inst_dict = instance_to_dict(self) + + if flatten_limits: + vals = inst_dict.pop("fs") + inst_dict.update(self._flatten_tres("fs", vals)) + + vals = inst_dict.pop("license") + inst_dict.update(self._flatten_tres("license", vals)) + + vals = inst_dict.pop("gres") + inst_dict.update(self._flatten_tres("gres", vals)) + + return inst_dict cdef class TrackableResourceFilter: @@ -162,7 +241,7 @@ cdef class TrackableResource: def type_and_name(self): type_and_name = self.type if self.name: - type_and_name = f"{type_and_name}/{self.name}" + type_and_name = f"{type_and_name}{TRES_TYPE_DELIM}{self.name}" return type_and_name @@ -206,24 +285,22 @@ cdef merge_tres_str(char **tres_str, typ, val): cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data): if not tres_str: - return {} + return None cdef: dict tdict = cstr.to_dict(tres_str) - dict out = {} + list out = [] if not tres_data: - return tdict + return None for tid, cnt in tdict.items(): if isinstance(tid, str) and tid.isdigit(): _tid = int(tid) if _tid in tres_data: - out[tres_data[_tid].type_and_name] = cnt - continue - - # If we can't find the TRES ID in our data, return it raw. - out[tid] = cnt + out.append( + (tres_data[_tid].type, tres_data[_tid].name, int(cnt)) + ) return out From 62299a5e91e3e3ea9145ca81977f2ecaa1d93f78 Mon Sep 17 00:00:00 2001 From: tazend Date: Sat, 10 Jun 2023 22:59:30 +0200 Subject: [PATCH 05/28] wip --- pyslurm/db/assoc.pxd | 12 +-- pyslurm/db/assoc.pyx | 169 +++++++++++++++++++++++-------------------- pyslurm/db/job.pyx | 137 ++++++++++++----------------------- pyslurm/db/qos.pxd | 4 +- pyslurm/db/qos.pyx | 86 +++++++++------------- pyslurm/db/tres.pxd | 4 +- pyslurm/db/tres.pyx | 39 ++++++---- 7 files changed, 202 insertions(+), 249 deletions(-) diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 235ec5d5..bc7d3fe7 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -41,20 +41,21 @@ from pyslurm.db.util cimport ( qos_list_to_pylist, ) from pyslurm.db.tres cimport ( - find_tres_limit, - merge_tres_str, - tres_ids_to_names, + _set_tres_limits, TrackableResources, TrackableResourceLimits, ) from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.utils.uint cimport * -from pyslurm.db.qos cimport QualitiesOfService +from pyslurm.db.qos cimport QualitiesOfService, _set_qos_list + +cdef _parse_assoc_ptr(Association ass) +cdef _create_assoc_ptr(Association ass, conn=*) cdef class Associations(dict): - cdef SlurmList info + pass cdef class AssociationFilter: @@ -79,6 +80,7 @@ cdef class Association: max_tres_run_mins_per_user max_tres_per_job max_tres_per_node + qos @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr) diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 951af4f0..321d208f 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -27,8 +27,8 @@ from pyslurm.utils.helpers import ( instance_to_dict, user_to_uid, ) -from pyslurm.db.tres import tres_names_to_ids from pyslurm.utils.uint import * +from pyslurm.db.connection import _open_conn_or_error cdef class Associations(dict): @@ -37,76 +37,78 @@ cdef class Associations(dict): pass @staticmethod - def load(AssociationFilter search_filter=None, - Connection db_connection=None): + def load(AssociationFilter db_filter=None, Connection db_connection=None): cdef: - Associations assoc_dict = Associations() + Associations out = Associations() Association assoc - AssociationFilter cond = search_filter + AssociationFilter cond = db_filter + SlurmList assoc_data SlurmListItem assoc_ptr - Connection conn = db_connection + Connection conn QualitiesOfService qos_data TrackableResources tres_data - if not search_filter: + # Prepare SQL Filter + if not db_filter: cond = AssociationFilter() cond._create() - if not conn: - conn = Connection.open() + # Setup DB Conn + conn = _open_conn_or_error(db_connection) - if not conn.is_open: - raise ValueError("Database connection is not open") - - assoc_dict.info = SlurmList.wrap( - slurmdb_associations_get(conn.ptr, cond.ptr)) + # Fetch Assoc Data + assoc_data = SlurmList.wrap(slurmdb_associations_get( + conn.ptr, cond.ptr)) - if assoc_dict.info.is_null: + if assoc_data.is_null: raise RPCError(msg="Failed to get Association data from slurmdbd") + # Fetch other necessary dependencies needed for translating some + # attributes (i.e QoS IDs to its name) qos_data = QualitiesOfService.load(name_is_key=False, db_connection=conn) tres_data = TrackableResources.load(name_is_key=False, db_connection=conn) - for assoc_ptr in SlurmList.iter_and_pop(assoc_dict.info): + # Setup Association objects + for assoc_ptr in SlurmList.iter_and_pop(assoc_data): assoc = Association.from_ptr(assoc_ptr.data) assoc.qos_data = qos_data assoc.tres_data = tres_data - assoc._parse_tres() - assoc_dict[assoc.id] = assoc + _parse_assoc_ptr(assoc) + out[assoc.id] = assoc - return assoc_dict + return out @staticmethod - def modify(assocs, Association changes, - Connection db_connection=None): + def modify(db_filter, Association changes, Connection db_connection=None): cdef: AssociationFilter afilter - Connection conn = db_connection + Connection conn SlurmList response SlurmListItem response_ptr list out = [] - if not conn: - conn = Connection.open() - - if not conn.is_open: - raise ValueError("Database connection is not open") - - if isinstance(assocs, Associations): - assoc_ids = list(assocs.keys()) + # Prepare SQL Filter + if isinstance(db_filter, Associations): + assoc_ids = list(db_filter.keys()) afilter = AssociationFilter(ids=assoc_ids) else: - afilter = assocs + afilter = db_filter + afilter._create() - # Check if TRES specified are actually valid. slurmdbd does not - # give an explicit error and just ignores invalid tres types. - changes._validate_tres() + # Setup DB conn + conn = _open_conn_or_error(db_connection) - afilter._create() - response = SlurmList.wrap( - slurmdb_associations_modify(conn.ptr, afilter.ptr, changes.ptr)) + # Any data that isn't parsed yet or needs validation is done in this + # function. + _create_assoc_ptr(changes, conn) + + # Modify associations, get the result + # This returns a List of char* with the associations that were + # modified + response = SlurmList.wrap(slurmdb_associations_modify( + conn.ptr, afilter.ptr, changes.ptr)) if not response.is_null and response.cnt: for response_ptr in response: @@ -195,23 +197,6 @@ cdef class Association: wrap.ptr = in_ptr return wrap - def _parse_tres(self): - cdef TrackableResources tres = self.tres_data - self.group_tres = TrackableResourceLimits.from_ids( - self.ptr.grp_tres, tres) - self.group_tres_mins = TrackableResourceLimits.from_ids( - self.ptr.grp_tres_mins, tres) - self.group_tres_run_mins = TrackableResourceLimits.from_ids( - self.ptr.grp_tres_mins, tres) - self.max_tres_mins_per_job = TrackableResourceLimits.from_ids( - self.ptr.max_tres_mins_pj, tres) - self.max_tres_run_mins_per_user = TrackableResourceLimits.from_ids( - self.ptr.max_tres_run_mins, tres) - self.max_tres_per_job = TrackableResourceLimits.from_ids( - self.ptr.max_tres_pj, tres) - self.max_tres_per_node = TrackableResourceLimits.from_ids( - self.ptr.max_tres_pn, tres) - def as_dict(self): """Database Association information formatted as a dictionary. @@ -220,23 +205,6 @@ cdef class Association: """ return instance_to_dict(self) - def _validate_tres(self): - self.tres_data = TrackableResources.load(name_is_key=False) - cstr.from_dict(&self.ptr.grp_tres, - self.group_tres._validate(self.tres_data)) - cstr.from_dict(&self.ptr.grp_tres_mins, - self.group_tres_mins._validate(self.tres_data)) - cstr.from_dict(&self.ptr.grp_tres_run_mins, - self.group_tres_run_mins._validate(self.tres_data)) - cstr.from_dict(&self.ptr.max_tres_mins_pj, - self.max_tres_mins_per_job._validate(self.tres_data)) - cstr.from_dict(&self.ptr.max_tres_run_mins, - self.max_tres_run_mins_per_user._validate(self.tres_data)) - cstr.from_dict(&self.ptr.max_tres_pj, - self.max_tres_per_job._validate(self.tres_data)) - cstr.from_dict(&self.ptr.max_tres_pn, - self.max_tres_per_node._validate(self.tres_data)) - @staticmethod def load(name): pass @@ -381,15 +349,6 @@ cdef class Association: def priority(self, val): self.ptr.priority = u32(val) - @property - def qos(self): - return qos_list_to_pylist(self.ptr.qos_list, self.qos_data) - - @qos.setter - def qos(self, val): - # TODO: must be ids - make_char_list(&self.ptr.qos_list, val) - @property def rgt(self): return self.ptr.rgt @@ -410,3 +369,53 @@ cdef class Association: def user(self, val): cstr.fmalloc(&self.ptr.user, val) + +cdef _parse_assoc_ptr(Association ass): + cdef: + TrackableResources tres = ass.tres_data + QualitiesOfService qos = ass.qos_data + + ass.group_tres = TrackableResourceLimits.from_ids( + ass.ptr.grp_tres, tres) + ass.group_tres_mins = TrackableResourceLimits.from_ids( + ass.ptr.grp_tres_mins, tres) + ass.group_tres_run_mins = TrackableResourceLimits.from_ids( + ass.ptr.grp_tres_mins, tres) + ass.max_tres_mins_per_job = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_mins_pj, tres) + ass.max_tres_run_mins_per_user = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_run_mins, tres) + ass.max_tres_per_job = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_pj, tres) + ass.max_tres_per_node = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_pn, tres) + ass.qos = qos_list_to_pylist(ass.ptr.qos_list, qos) + + +cdef _create_assoc_ptr(Association ass, conn=None): + # _set_tres_limits will also check if specified TRES are valid and + # translate them to its ID which is why we need to load the current TRES + # available in the system. + ass.tres_data = TrackableResources.load(name_is_key=False, + db_connection=conn) + _set_tres_limits(&ass.ptr.grp_tres, ass.group_tres, ass.tres_data) + _set_tres_limits(&ass.ptr.grp_tres_mins, ass.group_tres_mins, + ass.tres_data) + _set_tres_limits(&ass.ptr.grp_tres_run_mins, ass.group_tres_run_mins, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_mins_pj, ass.max_tres_mins_per_job, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_run_mins, ass.max_tres_run_mins_per_user, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_pj, ass.max_tres_per_job, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_pn, ass.max_tres_per_node, + ass.tres_data) + + # _set_qos_list will also check if specified QoS are valid and translate + # them to its ID, which is why we need to load the current QOS available + # in the system. + ass.qos_data = QualitiesOfService.load(name_is_key=False, + db_connection=conn) + _set_qos_list(&ass.ptr.qos_list, self.qos, ass.qos_data) + diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 636e1137..08f6c769 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -27,7 +27,6 @@ from pyslurm.core.error import RPCError, PyslurmError from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * -from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -40,14 +39,12 @@ from pyslurm.utils.helpers import ( uid_to_name, nodelist_to_range_str, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, _get_exit_code, ) from pyslurm.db.connection import _open_conn_or_error -cdef class JobFilter: +cdef class JobSearchFilter: def __cinit__(self): self.ptr = NULL @@ -77,19 +74,14 @@ cdef class JobFilter: return None qos_id_list = [] - qos_data = QualitiesOfService.load() - for user_input in self.qos: - found = False - for qos in qos_data: - if (qos.id == user_input - or qos.name == user_input - or qos == user_input): - qos_id_list.append(str(qos.id)) - found = True - break - - if not found: - raise ValueError(f"QoS '{user_input}' does not exist") + qos = QualitiesOfService.load() + for q in self.qos: + if isinstance(q, int): + qos_id_list.append(q) + elif q in qos: + qos_id_list.append(str(qos[q].id)) + else: + raise ValueError(f"QoS {q} does not exist") return qos_id_list @@ -105,9 +97,11 @@ cdef class JobFilter: def _parse_clusters(self): if not self.clusters: + # Get the local cluster name # This is a requirement for some other parameters to function # correctly, like self.nodelist - return [LOCAL_CLUSTER] + slurm_conf = slurmctld.Config.load() + return [slurm_conf.cluster] elif self.clusters == "all": return None else: @@ -185,67 +179,29 @@ cdef class JobFilter: slurmdb_job_cond_def_start_end(ptr) -# Alias -JobSearchFilter = JobFilter - - -cdef class Jobs(list): +cdef class Jobs(dict): def __init__(self, jobs=None): - if isinstance(jobs, list): - for job in jobs: - if isinstance(job, int): - self.append(Job(job)) - else: - self.append(job) + if isinstance(jobs, dict): + self.update(jobs) elif isinstance(jobs, str): joblist = jobs.split(",") - self.extend([Job(job) for job in joblist]) - elif isinstance(jobs, dict): - self.extend([job for job in jobs.values()]) + self.update({int(job): Job(job) for job in joblist}) elif jobs is not None: - raise TypeError("Invalid Type: {type(jobs)}") - - def as_dict(self, recursive=False, group_by_cluster=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - group_by_cluster (bool, optional): - By default, only the Jobs from your local Cluster are - returned. If this is set to `True`, then all the Jobs in the - collection will be grouped by the Cluster - with the name of - the cluster as the key and the value being the collection as - another dict. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Job.id, recursive=recursive) - if not group_by_cluster: - return col.get(LOCAL_CLUSTER, {}) - - return col - - def group_by_cluster(self): - """Group Jobs by cluster name - - Returns: - (dict[str, Jobs]): Jobs grouped by cluster. - """ - return group_collection_by_cluster(self) + for job in jobs: + if isinstance(job, int): + self[job] = Job(job) + else: + self[job.name] = job @staticmethod - def load(JobFilter db_filter=None, Connection db_connection=None): + def load(JobSearchFilter db_filter=None, Connection db_connection=None): """Load Jobs from the Slurm Database Implements the slurmdb_jobs_get RPC. Args: - db_filter (pyslurm.db.JobFilter): + db_filter (pyslurm.db.JobSearchFilter): A search filter that the slurmdbd will apply when retrieving Jobs from the database. db_connection (pyslurm.db.Connection): @@ -270,21 +226,21 @@ cdef class Jobs(list): >>> import pyslurm >>> accounts = ["acc1", "acc2"] - >>> db_filter = pyslurm.db.JobFilter(accounts=accounts) + >>> db_filter = pyslurm.db.JobSearchFilter(accounts=accounts) >>> db_jobs = pyslurm.db.Jobs.load(db_filter) """ cdef: Jobs out = Jobs() Job job - JobFilter cond = db_filter + JobSearchFilter cond = db_filter SlurmList job_data SlurmListItem job_ptr Connection conn - dict qos_data + QualitiesOfService qos_data # Prepare SQL Filter if not db_filter: - cond = JobFilter() + cond = JobSearchFilter() cond._create() # Setup DB Conn @@ -297,8 +253,8 @@ cdef class Jobs(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(db_connection=conn).as_dict( - name_is_key=False) + qos_data = QualitiesOfService.load(name_is_key=False, + db_connection=conn) # TODO: also get trackable resources with slurmdb_tres_get and store # it in each job instance. tres_alloc_str and tres_req_str only @@ -313,7 +269,7 @@ cdef class Jobs(list): job.qos_data = qos_data job._create_steps() JobStatistics._sum_step_stats_for_job(job, job.steps) - out.append(job) + out[job.id] = job return out @@ -324,7 +280,7 @@ cdef class Jobs(list): Implements the slurm_job_modify RPC. Args: - db_filter (Union[pyslurm.db.JobFilter, pyslurm.db.Jobs]): + db_filter (Union[pyslurm.db.JobSearchFilter, pyslurm.db.Jobs]): A filter to decide which Jobs should be modified. changes (pyslurm.db.Job): Another [pyslurm.db.Job][] object that contains all the @@ -359,7 +315,7 @@ cdef class Jobs(list): >>> import pyslurm >>> - >>> db_filter = pyslurm.db.JobFilter(ids=[9999]) + >>> db_filter = pyslurm.db.JobSearchFilter(ids=[9999]) >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify(db_filter, changes) >>> print(modified_jobs) @@ -373,7 +329,7 @@ cdef class Jobs(list): >>> import pyslurm >>> >>> db_conn = pyslurm.db.Connection.open() - >>> db_filter = pyslurm.db.JobFilter(ids=[9999]) + >>> db_filter = pyslurm.db.JobSearchFilter(ids=[9999]) >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify( ... db_filter, changes, db_conn) @@ -386,7 +342,7 @@ cdef class Jobs(list): >>> db_conn.commit() """ cdef: - JobFilter cond + JobSearchFilter cond Connection conn SlurmList response SlurmListItem response_ptr @@ -394,10 +350,10 @@ cdef class Jobs(list): # Prepare SQL Filter if isinstance(db_filter, Jobs): - job_ids = [job.id for job in self] - cond = JobFilter(ids=job_ids) + job_ids = list(db_filter.keys()) + cond = JobSearchFilter(ids=job_ids) else: - cond = db_filter + cond = db_filter cond._create() # Setup DB Conn @@ -444,10 +400,9 @@ cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id=0, cluster=LOCAL_CLUSTER, **kwargs): + def __init__(self, job_id=0, **kwargs): self._alloc_impl() self.ptr.jobid = int(job_id) - cstr.fmalloc(&self.ptr.cluster, cluster) for k, v in kwargs.items(): setattr(self, k, v) @@ -471,7 +426,7 @@ cdef class Job: return wrap @staticmethod - def load(job_id, cluster=LOCAL_CLUSTER, with_script=False, with_env=False): + def load(job_id, with_script=False, with_env=False): """Load the information for a specific Job from the Database. Args: @@ -498,15 +453,13 @@ cdef class Job: >>> print(db_job.script) """ - jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], - with_script=with_script, with_env=with_env) + jfilter = JobSearchFilter(ids=[int(job_id)], + with_script=with_script, with_env=with_env) jobs = Jobs.load(jfilter) - if not jobs: - raise RPCError(msg=f"Job {job_id} does not exist on " - f"Cluster {cluster}") + if not jobs or job_id not in jobs: + raise RPCError(msg=f"Job {job_id} does not exist") - # TODO: There might be multiple entries when job ids were reset. - return jobs[0] + return jobs[job_id] def _create_steps(self): cdef: @@ -559,7 +512,7 @@ cdef class Job: Raises: RPCError: When modifying the Job failed. """ - cdef JobFilter jfilter = JobFilter(ids=[self.id]) + cdef JobSearchFilter jfilter = JobSearchFilter(ids=[self.id]) Jobs.modify(jfilter, changes, db_connection) @property diff --git a/pyslurm/db/qos.pxd b/pyslurm/db/qos.pxd index 9cb3df86..c72378b1 100644 --- a/pyslurm/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -44,11 +44,11 @@ from pyslurm.utils cimport cstr cdef _set_qos_list(List *in_list, vals, QualitiesOfService data) -cdef class QualitiesOfService(list): +cdef class QualitiesOfService(dict): pass -cdef class QualityOfServiceFilter: +cdef class QualityOfServiceSearchFilter: cdef slurmdb_qos_cond_t *ptr cdef public: diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index a01ef9b0..14c0c6b7 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,52 +23,50 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.utils.helpers import instance_to_dict from pyslurm.db.connection import _open_conn_or_error -cdef class QualitiesOfService(list): +def _qos_names_to_ids(qos_list, QualitiesOfService data): + cdef list out = [] + if not qos_list: + return None - def __init__(self): - pass + return [_validate_qos_single(qid, data) for qid in qos_list] - def as_dict(self, recursive=False, name_is_key=True): - """Convert the collection data to a dict. - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - name_is_key (bool, optional): - By default, the keys in this dict are the names of each QoS. - If this is set to `False`, then the unique ID of the QoS will - be used as dict keys. +def _validate_qos_single(qid, QualitiesOfService data): + for item in data.values(): + if qid == item.id or qid == item.name: + return item.id - Returns: - (dict): Collection as a dict. - """ - identifier = QualityOfService.name - if not name_is_key: - identifier = QualityOfService.id + raise ValueError(f"Invalid QOS specified: {qid}") - return collection_to_dict_global(self, identifier=identifier, - recursive=recursive) + +cdef _set_qos_list(List *in_list, vals, QualitiesOfService data): + qos_ids = _qos_names_to_ids(vals, data) + make_char_list(in_list, qos_ids) + + +cdef class QualitiesOfService(dict): + + def __init__(self): + pass @staticmethod - def load(QualityOfServiceFilter db_filter=None, - Connection db_connection=None): + def load(QualityOfServiceSearchFilter db_filter=None, + db_connection=None, name_is_key=True): cdef: QualitiesOfService out = QualitiesOfService() QualityOfService qos - QualityOfServiceFilter cond = db_filter + QualityOfServiceSearchFilter cond = db_filter SlurmList qos_data SlurmListItem qos_ptr Connection conn # Prepare SQL Filter if not db_filter: - cond = QualityOfServiceFilter() + cond = QualityOfServiceSearchFilter() cond._create() # Setup DB Conn @@ -83,12 +81,15 @@ cdef class QualitiesOfService(list): # Setup QOS objects for qos_ptr in SlurmList.iter_and_pop(qos_data): qos = QualityOfService.from_ptr(qos_ptr.data) - out.append(qos) + if name_is_key: + out[qos.name] = qos + else: + out[qos.id] = qos return out -cdef class QualityOfServiceFilter: +cdef class QualityOfServiceSearchFilter: def __cinit__(self): self.ptr = NULL @@ -194,12 +195,12 @@ cdef class QualityOfService: RPCError: If requesting the information from the database was not sucessful. """ - qfilter = QualityOfServiceFilter(names=[name]) + qfilter = QualityOfServiceSearchFilter(names=[name]) qos_data = QualitiesOfService.load(qfilter) - if not qos_data: + if not qos_data or name not in qos_data: raise RPCError(msg=f"QualityOfService {name} does not exist") - return qos_data[0] + return qos_data[name] @property def name(self): @@ -216,24 +217,3 @@ cdef class QualityOfService: @property def id(self): return self.ptr.id - - -def _qos_names_to_ids(qos_list, QualitiesOfService data): - cdef list out = [] - if not qos_list: - return None - - return [_validate_qos_single(qid, data) for qid in qos_list] - - -def _validate_qos_single(qid, QualitiesOfService data): - for item in data: - if qid == item.id or qid == item.name: - return item.id - - raise ValueError(f"Invalid QOS specified: {qid}") - - -cdef _set_qos_list(List *in_list, vals, QualitiesOfService data): - qos_ids = _qos_names_to_ids(vals, data) - make_char_list(in_list, qos_ids) diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index a37fd9bf..23b44ad2 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -42,7 +42,9 @@ from pyslurm.db.connection cimport Connection cdef find_tres_count(char *tres_str, typ, on_noval=*, on_inf=*) cdef find_tres_limit(char *tres_str, typ) cdef merge_tres_str(char **tres_str, typ, val) -cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data) +cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data) +cdef _set_tres_limits(char **dest, TrackableResourceLimits src, + TrackableResources tres_data) cdef class TrackableResourceLimits: diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index 4ba94792..b9a364d7 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -27,6 +27,7 @@ from pyslurm.constants import UNLIMITED from pyslurm.core.error import RPCError from pyslurm.utils.helpers import instance_to_dict from pyslurm.utils import cstr +from pyslurm.db.connection import _open_conn_or_error import json @@ -56,7 +57,7 @@ cdef class TrackableResourceLimits: @staticmethod cdef from_ids(char *tres_id_str, TrackableResources tres_data): - tres_list = tres_ids_to_names(tres_id_str, tres_data) + tres_list = _tres_ids_to_names(tres_id_str, tres_data) if not tres_list: return None @@ -75,7 +76,7 @@ cdef class TrackableResourceLimits: return out def _validate(self, TrackableResources tres_data): - id_dict = tres_names_to_ids(self.as_dict(flatten_limits=True), + id_dict = _tres_names_to_ids(self.as_dict(flatten_limits=True), tres_data) return id_dict @@ -143,24 +144,25 @@ cdef class TrackableResources(dict): cdef: TrackableResources out = TrackableResources() TrackableResource tres - Connection conn = db_connection - SlurmList tres_list + Connection conn + SlurmList tres_data SlurmListItem tres_ptr TrackableResourceFilter db_filter = TrackableResourceFilter() - if not conn: - conn = Connection.open() + # Prepare SQL Filter + db_filter._create() - if not conn.is_open: - raise ValueError("Database connection is not open") + # Setup DB Conn + conn = _open_conn_or_error(db_connection) - db_filter._create() - tres_list = SlurmList.wrap(slurmdb_tres_get(conn.ptr, db_filter.ptr)) + # Fetch TRES data + tres_data = SlurmList.wrap(slurmdb_tres_get(conn.ptr, db_filter.ptr)) - if tres_list.is_null: + if tres_data.is_null: raise RPCError(msg="Failed to get TRES data from slurmdbd") - for tres_ptr in SlurmList.iter_and_pop(tres_list): + # Setup TRES objects + for tres_ptr in SlurmList.iter_and_pop(tres_data): tres = TrackableResource.from_ptr( tres_ptr.data) @@ -283,7 +285,7 @@ cdef merge_tres_str(char **tres_str, typ, val): cstr.from_dict(tres_str, current) -cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data): +cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data): if not tres_str: return None @@ -305,21 +307,26 @@ cdef tres_ids_to_names(char *tres_str, TrackableResources tres_data): return out -def tres_names_to_ids(dict tres_dict, TrackableResources tres_data): +def _tres_names_to_ids(dict tres_dict, TrackableResources tres_data): cdef dict out = {} if not tres_dict: return out for tid, cnt in tres_dict.items(): - real_id = validate_tres_single(tid, tres_data) + real_id = _validate_tres_single(tid, tres_data) out[real_id] = cnt return out -def validate_tres_single(tid, TrackableResources tres_data): +def _validate_tres_single(tid, TrackableResources tres_data): for tres in tres_data.values(): if tid == tres.id or tid == tres.type_and_name: return tres.id raise ValueError(f"Invalid TRES specified: {tid}") + + +cdef _set_tres_limits(char **dest, TrackableResourceLimits src, + TrackableResources tres_data): + cstr.from_dict(dest, src._validate(tres_data)) From 929c0c4e646d39645480fb18c12505dbbc9d9742 Mon Sep 17 00:00:00 2001 From: tazend Date: Sat, 10 Jun 2023 23:18:08 +0200 Subject: [PATCH 06/28] wip --- pyslurm/db/__init__.py | 3 ++- pyslurm/db/job.pyx | 32 ++++++++++++++++++-------------- pyslurm/db/qos.pxd | 2 +- pyslurm/db/qos.pyx | 10 +++++----- 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index 377fec6e..acd36a40 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -25,6 +25,7 @@ from .job import ( Job, Jobs, + JobFilter, JobSearchFilter, ) from .tres import ( @@ -34,7 +35,7 @@ from .qos import ( QualitiesOfService, QualityOfService, - QualityOfServiceSearchFilter, + QualityOfServiceFilter, ) from .assoc import ( Associations, diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 08f6c769..035d3c6e 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -44,7 +44,7 @@ from pyslurm.utils.helpers import ( from pyslurm.db.connection import _open_conn_or_error -cdef class JobSearchFilter: +cdef class JobFilter: def __cinit__(self): self.ptr = NULL @@ -179,6 +179,10 @@ cdef class JobSearchFilter: slurmdb_job_cond_def_start_end(ptr) +# Alias +JobSearchFilter = JobFilter + + cdef class Jobs(dict): def __init__(self, jobs=None): @@ -195,13 +199,13 @@ cdef class Jobs(dict): self[job.name] = job @staticmethod - def load(JobSearchFilter db_filter=None, Connection db_connection=None): + def load(JobFilter db_filter=None, Connection db_connection=None): """Load Jobs from the Slurm Database Implements the slurmdb_jobs_get RPC. Args: - db_filter (pyslurm.db.JobSearchFilter): + db_filter (pyslurm.db.JobFilter): A search filter that the slurmdbd will apply when retrieving Jobs from the database. db_connection (pyslurm.db.Connection): @@ -226,13 +230,13 @@ cdef class Jobs(dict): >>> import pyslurm >>> accounts = ["acc1", "acc2"] - >>> db_filter = pyslurm.db.JobSearchFilter(accounts=accounts) + >>> db_filter = pyslurm.db.JobFilter(accounts=accounts) >>> db_jobs = pyslurm.db.Jobs.load(db_filter) """ cdef: Jobs out = Jobs() Job job - JobSearchFilter cond = db_filter + JobFilter cond = db_filter SlurmList job_data SlurmListItem job_ptr Connection conn @@ -240,7 +244,7 @@ cdef class Jobs(dict): # Prepare SQL Filter if not db_filter: - cond = JobSearchFilter() + cond = JobFilter() cond._create() # Setup DB Conn @@ -280,7 +284,7 @@ cdef class Jobs(dict): Implements the slurm_job_modify RPC. Args: - db_filter (Union[pyslurm.db.JobSearchFilter, pyslurm.db.Jobs]): + db_filter (Union[pyslurm.db.JobFilter, pyslurm.db.Jobs]): A filter to decide which Jobs should be modified. changes (pyslurm.db.Job): Another [pyslurm.db.Job][] object that contains all the @@ -315,7 +319,7 @@ cdef class Jobs(dict): >>> import pyslurm >>> - >>> db_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> db_filter = pyslurm.db.JobFilter(ids=[9999]) >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify(db_filter, changes) >>> print(modified_jobs) @@ -329,7 +333,7 @@ cdef class Jobs(dict): >>> import pyslurm >>> >>> db_conn = pyslurm.db.Connection.open() - >>> db_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> db_filter = pyslurm.db.JobFilter(ids=[9999]) >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify( ... db_filter, changes, db_conn) @@ -342,7 +346,7 @@ cdef class Jobs(dict): >>> db_conn.commit() """ cdef: - JobSearchFilter cond + JobFilter cond Connection conn SlurmList response SlurmListItem response_ptr @@ -351,9 +355,9 @@ cdef class Jobs(dict): # Prepare SQL Filter if isinstance(db_filter, Jobs): job_ids = list(db_filter.keys()) - cond = JobSearchFilter(ids=job_ids) + cond = JobFilter(ids=job_ids) else: - cond = db_filter + cond = db_filter cond._create() # Setup DB Conn @@ -453,7 +457,7 @@ cdef class Job: >>> print(db_job.script) """ - jfilter = JobSearchFilter(ids=[int(job_id)], + jfilter = JobFilter(ids=[int(job_id)], with_script=with_script, with_env=with_env) jobs = Jobs.load(jfilter) if not jobs or job_id not in jobs: @@ -512,7 +516,7 @@ cdef class Job: Raises: RPCError: When modifying the Job failed. """ - cdef JobSearchFilter jfilter = JobSearchFilter(ids=[self.id]) + cdef JobFilter jfilter = JobFilter(ids=[self.id]) Jobs.modify(jfilter, changes, db_connection) @property diff --git a/pyslurm/db/qos.pxd b/pyslurm/db/qos.pxd index c72378b1..ea0fde2d 100644 --- a/pyslurm/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -48,7 +48,7 @@ cdef class QualitiesOfService(dict): pass -cdef class QualityOfServiceSearchFilter: +cdef class QualityOfServiceFilter: cdef slurmdb_qos_cond_t *ptr cdef public: diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 14c0c6b7..2e3074c4 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -54,19 +54,19 @@ cdef class QualitiesOfService(dict): pass @staticmethod - def load(QualityOfServiceSearchFilter db_filter=None, + def load(QualityOfServiceFilter db_filter=None, db_connection=None, name_is_key=True): cdef: QualitiesOfService out = QualitiesOfService() QualityOfService qos - QualityOfServiceSearchFilter cond = db_filter + QualityOfServiceFilter cond = db_filter SlurmList qos_data SlurmListItem qos_ptr Connection conn # Prepare SQL Filter if not db_filter: - cond = QualityOfServiceSearchFilter() + cond = QualityOfServiceFilter() cond._create() # Setup DB Conn @@ -89,7 +89,7 @@ cdef class QualitiesOfService(dict): return out -cdef class QualityOfServiceSearchFilter: +cdef class QualityOfServiceFilter: def __cinit__(self): self.ptr = NULL @@ -195,7 +195,7 @@ cdef class QualityOfService: RPCError: If requesting the information from the database was not sucessful. """ - qfilter = QualityOfServiceSearchFilter(names=[name]) + qfilter = QualityOfServiceFilter(names=[name]) qos_data = QualitiesOfService.load(qfilter) if not qos_data or name not in qos_data: raise RPCError(msg=f"QualityOfService {name} does not exist") From 292a70ec045d83aca8d029cda77926263644d755 Mon Sep 17 00:00:00 2001 From: tazend Date: Fri, 16 Jun 2023 21:07:04 +0200 Subject: [PATCH 07/28] wip --- pyslurm/db/assoc.pxd | 2 +- pyslurm/db/assoc.pyx | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index bc7d3fe7..ee889467 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -54,7 +54,7 @@ cdef _parse_assoc_ptr(Association ass) cdef _create_assoc_ptr(Association ass, conn=*) -cdef class Associations(dict): +cdef class Associations(list): pass diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 321d208f..f85a2d8a 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -31,7 +31,7 @@ from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error -cdef class Associations(dict): +cdef class Associations(list): def __init__(self): pass @@ -76,7 +76,7 @@ cdef class Associations(dict): assoc.qos_data = qos_data assoc.tres_data = tres_data _parse_assoc_ptr(assoc) - out[assoc.id] = assoc + out.append(assoc) return out @@ -91,7 +91,7 @@ cdef class Associations(dict): # Prepare SQL Filter if isinstance(db_filter, Associations): - assoc_ids = list(db_filter.keys()) + assoc_ids = [ass.id for ass in db_filter] afilter = AssociationFilter(ids=assoc_ids) else: afilter = db_filter @@ -172,8 +172,11 @@ cdef class Association: def __cinit__(self): self.ptr = NULL - def __init__(self): + def __init__(self, **kwargs): self._alloc_impl() + self.id = 0 + for k, v in kwargs.items(): + setattr(self, k, v) def __dealloc__(self): self._dealloc_impl() @@ -205,9 +208,10 @@ cdef class Association: """ return instance_to_dict(self) - @staticmethod - def load(name): - pass + def __eq__(self, other): + if isinstance(other, Association): + return self.id == other.id and self.cluster == other.cluster + return NotImplemented @property def account(self): @@ -271,7 +275,7 @@ cdef class Association: @property def id(self): - return self.ptr.id + return u32_parse(self.ptr.id) @id.setter def id(self, val): @@ -283,7 +287,7 @@ cdef class Association: @property def lft(self): - return self.ptr.lft + return u32_parse(self.ptr.lft) @property def max_jobs(self): @@ -351,7 +355,7 @@ cdef class Association: @property def rgt(self): - return self.ptr.rgt + return u32_parse(self.ptr.rgt) @property def shares(self): From 4bbbf7c943dd828afecded31a31ddf4f4260936b Mon Sep 17 00:00:00 2001 From: Toni Harzendorf Date: Sun, 18 Jun 2023 19:46:39 +0200 Subject: [PATCH 08/28] wip collections base list type --- pyslurm/core/partition.pyx | 43 +++++++++--------------------- pyslurm/db/assoc.pxd | 4 +-- pyslurm/db/assoc.pyx | 30 ++++++++++++--------- pyslurm/db/job.pyx | 26 +++++++++++------- pyslurm/db/qos.pxd | 2 +- pyslurm/db/qos.pyx | 21 ++++++++------- pyslurm/db/tres.pxd | 2 +- pyslurm/db/tres.pyx | 17 +++++++----- pyslurm/slurm/extra.pxi | 3 +++ pyslurm/utils/helpers.pyx | 54 ++++++++------------------------------ 10 files changed, 87 insertions(+), 115 deletions(-) diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 56375d33..68459c76 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -30,7 +30,6 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED -from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -58,35 +57,20 @@ cdef class Partitions(list): self.info = NULL def __init__(self, partitions=None): - if isinstance(partitions, list): - for part in partitions: - if isinstance(part, str): - self.append(Partition(part)) - else: - self.append(part) + if isinstance(partitions, dict): + self.update(partitions) elif isinstance(partitions, str): partlist = partitions.split(",") - self.extend([Partition(part) for part in partlist]) - elif isinstance(partitions, dict): - self.extend([part for part in partitions.values()]) + self.update({part: Partition(part) for part in partlist}) elif partitions is not None: - raise TypeError("Invalid Type: {type(partitions)}") - - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. + for part in partitions: + if isinstance(part, str): + self[part] = Partition(part) + else: + self[part.name] = part - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Partition.name, - recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) + def as_dict(self): + return collection_to_dict(self, False, False, Partition.name) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -128,6 +112,7 @@ cdef class Partitions(list): partition.power_save_enabled = power_save_enabled partition.slurm_conf = slurm_conf + partition.cluster = slurm_conf.cluster partitions.append(partition) # At this point we memcpy'd all the memory for the Partitions. Setting @@ -160,7 +145,7 @@ cdef class Partitions(list): return self reloaded_parts = Partitions.load().as_dict() - for idx, part in enumerate(self): + for part, idx in enumerate(self): part_name = part.name if part_name in reloaded_parts: # Put the new data in. @@ -209,7 +194,6 @@ cdef class Partition: def __init__(self, name=None, **kwargs): self._alloc_impl() self.name = name - self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) @@ -232,7 +216,6 @@ cdef class Partition: cdef Partition from_ptr(partition_info_t *in_ptr): cdef Partition wrap = Partition.__new__(Partition) wrap._alloc_impl() - wrap.cluster = LOCAL_CLUSTER memcpy(wrap.ptr, in_ptr, sizeof(partition_info_t)) return wrap @@ -274,7 +257,7 @@ cdef class Partition: >>> import pyslurm >>> part = pyslurm.Partition.load("normal") """ - partitions = Partitions.load().as_dict() + partitions = Partitions.load() if name not in partitions: raise RPCError(msg=f"Partition '{name}' doesn't exist") diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index ee889467..12a0cde1 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -69,8 +69,8 @@ cdef class AssociationFilter: cdef class Association: cdef: slurmdb_assoc_rec_t *ptr - QualitiesOfService qos_data - TrackableResources tres_data + dict qos_data + dict tres_data cdef public: group_tres diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index f85a2d8a..8697e4ef 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -25,6 +25,8 @@ from pyslurm.core.error import RPCError from pyslurm.utils.helpers import ( instance_to_dict, + collection_to_dict, + group_collection_by_cluster, user_to_uid, ) from pyslurm.utils.uint import * @@ -36,6 +38,12 @@ cdef class Associations(list): def __init__(self): pass + def as_dict(self, group_by_cluster=False): + return collection_to_dict(self, group_by_cluster, True, Association.id) + + def group_by_cluster(self): + return group_collection_by_cluster(self) + @staticmethod def load(AssociationFilter db_filter=None, Connection db_connection=None): cdef: @@ -45,8 +53,8 @@ cdef class Associations(list): SlurmList assoc_data SlurmListItem assoc_ptr Connection conn - QualitiesOfService qos_data - TrackableResources tres_data + dict qos_data + dict tres_data # Prepare SQL Filter if not db_filter: @@ -65,10 +73,10 @@ cdef class Associations(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(name_is_key=False, - db_connection=conn) - tres_data = TrackableResources.load(name_is_key=False, - db_connection=conn) + qos_data = QualitiesOfService.load(db_connection=conn).as_dict( + name_is_key=False) + tres_data = TrackableResources.load(db_connection=conn).as_dict( + name_is_key=False) # Setup Association objects for assoc_ptr in SlurmList.iter_and_pop(assoc_data): @@ -376,8 +384,8 @@ cdef class Association: cdef _parse_assoc_ptr(Association ass): cdef: - TrackableResources tres = ass.tres_data - QualitiesOfService qos = ass.qos_data + dict tres = ass.tres_data + dict qos = ass.qos_data ass.group_tres = TrackableResourceLimits.from_ids( ass.ptr.grp_tres, tres) @@ -400,8 +408,7 @@ cdef _create_assoc_ptr(Association ass, conn=None): # _set_tres_limits will also check if specified TRES are valid and # translate them to its ID which is why we need to load the current TRES # available in the system. - ass.tres_data = TrackableResources.load(name_is_key=False, - db_connection=conn) + ass.tres_data = TrackableResources.load(db_connection=conn) _set_tres_limits(&ass.ptr.grp_tres, ass.group_tres, ass.tres_data) _set_tres_limits(&ass.ptr.grp_tres_mins, ass.group_tres_mins, ass.tres_data) @@ -419,7 +426,6 @@ cdef _create_assoc_ptr(Association ass, conn=None): # _set_qos_list will also check if specified QoS are valid and translate # them to its ID, which is why we need to load the current QOS available # in the system. - ass.qos_data = QualitiesOfService.load(name_is_key=False, - db_connection=conn) + ass.qos_data = QualitiesOfService.load(db_connection=conn) _set_qos_list(&ass.ptr.qos_list, self.qos, ass.qos_data) diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 035d3c6e..f4996b3d 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -39,6 +39,7 @@ from pyslurm.utils.helpers import ( uid_to_name, nodelist_to_range_str, instance_to_dict, + collection_to_dict, _get_exit_code, ) from pyslurm.db.connection import _open_conn_or_error @@ -183,20 +184,25 @@ cdef class JobFilter: JobSearchFilter = JobFilter -cdef class Jobs(dict): +cdef class Jobs(list): def __init__(self, jobs=None): - if isinstance(jobs, dict): - self.update(jobs) - elif isinstance(jobs, str): - joblist = jobs.split(",") - self.update({int(job): Job(job) for job in joblist}) - elif jobs is not None: + if isinstance(jobs, list): for job in jobs: if isinstance(job, int): - self[job] = Job(job) + self.extend(Job(job)) else: - self[job.name] = job + self.extend(job) + elif isinstance(jobs, str): + joblist = jobs.split(",") + self.extend([Job(job) for job in joblist]) + elif isinstance(jobs, dict): + self.extend([job for job in jobs.values()]) + elif jobs is not None: + raise TypeError("Invalid Type: {type(jobs)}") + + def as_dict(self, by_cluster=False): + return collection_to_dict(self, by_cluster) @staticmethod def load(JobFilter db_filter=None, Connection db_connection=None): @@ -273,7 +279,7 @@ cdef class Jobs(dict): job.qos_data = qos_data job._create_steps() JobStatistics._sum_step_stats_for_job(job, job.steps) - out[job.id] = job + out.append(job) return out diff --git a/pyslurm/db/qos.pxd b/pyslurm/db/qos.pxd index ea0fde2d..9cb3df86 100644 --- a/pyslurm/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -44,7 +44,7 @@ from pyslurm.utils cimport cstr cdef _set_qos_list(List *in_list, vals, QualitiesOfService data) -cdef class QualitiesOfService(dict): +cdef class QualitiesOfService(list): pass diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 2e3074c4..303bc30a 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,7 +23,7 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict +from pyslurm.utils.helpers import instance_to_dict, collection_to_dict from pyslurm.db.connection import _open_conn_or_error @@ -36,7 +36,7 @@ def _qos_names_to_ids(qos_list, QualitiesOfService data): def _validate_qos_single(qid, QualitiesOfService data): - for item in data.values(): + for item in data: if qid == item.id or qid == item.name: return item.id @@ -48,14 +48,20 @@ cdef _set_qos_list(List *in_list, vals, QualitiesOfService data): make_char_list(in_list, qos_ids) -cdef class QualitiesOfService(dict): +cdef class QualitiesOfService(list): def __init__(self): pass + def as_dict(self, name_is_key=True): + identifier = QualityOfService.name + if not name_is_key: + identifier = QualityOfService.id + + return collection_to_dict(self, False, True, identifier) + @staticmethod - def load(QualityOfServiceFilter db_filter=None, - db_connection=None, name_is_key=True): + def load(QualityOfServiceFilter db_filter=None, db_connection=None): cdef: QualitiesOfService out = QualitiesOfService() QualityOfService qos @@ -81,10 +87,7 @@ cdef class QualitiesOfService(dict): # Setup QOS objects for qos_ptr in SlurmList.iter_and_pop(qos_data): qos = QualityOfService.from_ptr(qos_ptr.data) - if name_is_key: - out[qos.name] = qos - else: - out[qos.id] = qos + out.append(qos) return out diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index 23b44ad2..ef1568f6 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -69,7 +69,7 @@ cdef class TrackableResourceFilter: cdef slurmdb_tres_cond_t *ptr -cdef class TrackableResources(dict): +cdef class TrackableResources(list): cdef public raw_str @staticmethod diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index b9a364d7..d9d270b5 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -134,11 +134,18 @@ cdef class TrackableResourceFilter: self._alloc() -cdef class TrackableResources(dict): +cdef class TrackableResources(list): def __init__(self): pass + def as_dict(self, name_is_key=True): + identifier = TrackableResource.type_and_name + if not name_is_key: + identifier = TrackableResource.id + + return collection_to_dict(self, False, True, identifier) + @staticmethod def load(Connection db_connection=None, name_is_key=True): cdef: @@ -165,11 +172,7 @@ cdef class TrackableResources(dict): for tres_ptr in SlurmList.iter_and_pop(tres_data): tres = TrackableResource.from_ptr( tres_ptr.data) - - if name_is_key and tres.type: - out[tres.type_and_name] = tres - else: - out[tres.id] = tres + out.append(tres) return out @@ -320,7 +323,7 @@ def _tres_names_to_ids(dict tres_dict, TrackableResources tres_data): def _validate_tres_single(tid, TrackableResources tres_data): - for tres in tres_data.values(): + for tres in tres_data: if tid == tres.id or tid == tres.type_and_name: return tres.id diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index a15afd52..3557b0b9 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -165,6 +165,9 @@ ctypedef enum tres_types_t: # Global Environment cdef extern char **environ +# Local slurm config +cdef extern slurm_conf_t slurm_conf + # # Slurm Memory routines # We simply use the macros from xmalloc.h - more convenient diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index fb1d2201..dbd81d8e 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -341,33 +341,27 @@ def instance_to_dict(inst): return out -def collection_to_dict(collection, identifier, recursive=False, group_id=None): +def collection_to_dict(collection, by_cluster, is_global_data, identifier): cdef dict out = {} + if is_global_data: + for item in collection: + _id = identifier.__get__(item) + out[_id] = item + return out + for item in collection: cluster = item.cluster if cluster not in out: out[cluster] = {} _id = identifier.__get__(item) - data = item if not recursive else item.as_dict() - - if group_id: - grp_id = group_id.__get__(item) - if grp_id not in out[cluster]: - out[cluster][grp_id] = {} - out[cluster][grp_id].update({_id: data}) - else: - out[cluster][_id] = data - - return out + out[cluster][_id] = item + if not by_cluster: + # TODO: Return only local cluster data + return out -def collection_to_dict_global(collection, identifier, recursive=False): - cdef dict out = {} - for item in collection: - _id = identifier.__get__(item) - out[_id] = item if not recursive else item.as_dict() return out @@ -406,29 +400,3 @@ def _get_exit_code(exit_code): exit_state -= 128 return exit_state, sig - - -def humanize_step_id(sid): - if sid == slurm.SLURM_BATCH_SCRIPT: - return "batch" - elif sid == slurm.SLURM_EXTERN_CONT: - return "extern" - elif sid == slurm.SLURM_INTERACTIVE_STEP: - return "interactive" - elif sid == slurm.SLURM_PENDING_STEP: - return "pending" - else: - return sid - - -def dehumanize_step_id(sid): - if sid == "batch": - return slurm.SLURM_BATCH_SCRIPT - elif sid == "extern": - return slurm.SLURM_EXTERN_CONT - elif sid == "interactive": - return slurm.SLURM_INTERACTIVE_STEP - elif sid == "pending": - return slurm.SLURM_PENDING_STEP - else: - return int(sid) From 35cbcfd1130213ff5a4fcba6210510294adcb964 Mon Sep 17 00:00:00 2001 From: tazend Date: Mon, 19 Jun 2023 21:44:06 +0200 Subject: [PATCH 09/28] wip --- pyslurm/core/job/job.pyx | 38 +++++++++++--------------------------- pyslurm/core/node.pyx | 26 +++++--------------------- pyslurm/core/partition.pyx | 18 ++++++++++-------- pyslurm/db/job.pyx | 35 ++++++++++++++++++++++------------- 4 files changed, 48 insertions(+), 69 deletions(-) diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 2c33d581..f42d7f5d 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -34,7 +34,6 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * -from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.core.error import ( RPCError, verify_rpc, @@ -69,9 +68,9 @@ cdef class Jobs(list): if isinstance(jobs, list): for job in jobs: if isinstance(job, int): - self.append(Job(job)) + self.extend(Job(job)) else: - self.append(job) + self.extend(job) elif isinstance(jobs, str): joblist = jobs.split(",") self.extend([Job(int(job)) for job in joblist]) @@ -80,20 +79,8 @@ cdef class Jobs(list): elif jobs is not None: raise TypeError("Invalid Type: {type(jobs)}") - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Job.id, recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) + def as_dict(self): + return collection_to_dict(self, False, False, Job.id) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -178,7 +165,7 @@ cdef class Jobs(list): return self reloaded_jobs = Jobs.load().as_dict() - for idx, jid in enumerate(self): + for jid, idx in enumerate(self): if jid in reloaded_jobs: # Put the new data in. new_jobs.append(reloaded_jobs[jid]) @@ -196,7 +183,7 @@ cdef class Jobs(list): def load_steps(self): """Load all Job steps for this collection of Jobs. - This function fills in the `steps` attribute for all Jobs in the + This function fills in the "steps" attribute for all Jobs in the collection. !!! note @@ -207,16 +194,14 @@ cdef class Jobs(list): RPCError: When retrieving the Job information for all the Steps failed. """ - cdef dict steps = JobSteps.load().as_dict() + cdef dict step_info = JobSteps.load_all() - for idx, job in enumerate(self): + for job, idx in enumerate(self): # Ignore any Steps from Jobs which do not exist in this # collection. jid = job.id - if jid in steps: - job_steps = self[idx].steps - job_steps.clear() - job_steps.extend(steps[jid].values()) + if jid in step_info: + self[idx].steps = step_info[jid] @property def memory(self): @@ -245,7 +230,6 @@ cdef class Job: self.ptr.job_id = job_id self.passwd = {} self.groups = {} - cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) self.steps = JobSteps.__new__(JobSteps) def _alloc_impl(self): @@ -308,7 +292,7 @@ cdef class Job: if not slurm.IS_JOB_PENDING(wrap.ptr): # Just ignore if the steps couldn't be loaded here. try: - wrap.steps = JobSteps._load_single(wrap) + wrap.steps = JobSteps._load(wrap) except RPCError: pass else: diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 609016fe..da2c3258 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -28,7 +28,6 @@ from pyslurm.utils import ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time -from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -59,9 +58,9 @@ cdef class Nodes(list): if isinstance(nodes, list): for node in nodes: if isinstance(node, str): - self.append(Node(node)) + self.extend(Node(node)) else: - self.append(node) + self.extend(node) elif isinstance(nodes, str): nodelist = nodes.split(",") self.extend([Node(node) for node in nodelist]) @@ -70,21 +69,8 @@ cdef class Nodes(list): elif nodes is not None: raise TypeError("Invalid Type: {type(nodes)}") - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Node.name, - recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) + def as_dict(self): + return collection_to_dict(self, False, False, Node.name) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -170,7 +156,7 @@ cdef class Nodes(list): return self reloaded_nodes = Nodes.load().as_dict() - for idx, node in enumerate(self): + for node, idx in enumerate(self): node_name = node.name if node in reloaded_nodes: # Put the new data in. @@ -254,7 +240,6 @@ cdef class Node: def __init__(self, name=None, **kwargs): self._alloc_impl() self.name = name - self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) @@ -302,7 +287,6 @@ cdef class Node: wrap._alloc_info() wrap.passwd = {} wrap.groups = {} - wrap.cluster = LOCAL_CLUSTER memcpy(wrap.info, in_ptr, sizeof(node_info_t)) return wrap diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 68459c76..0e1fa013 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -57,17 +57,19 @@ cdef class Partitions(list): self.info = NULL def __init__(self, partitions=None): - if isinstance(partitions, dict): - self.update(partitions) - elif isinstance(partitions, str): - partlist = partitions.split(",") - self.update({part: Partition(part) for part in partlist}) - elif partitions is not None: + if isinstance(partitions, list): for part in partitions: if isinstance(part, str): - self[part] = Partition(part) + self.extend(Partition(part)) else: - self[part.name] = part + self.extend(part) + elif isinstance(partitions, str): + partlist = partitions.split(",") + self.extend([Partition(part) for part in partlist]) + elif isinstance(partitions, dict): + self.extend([part for part in partitions.values()]) + elif partitions is not None: + raise TypeError("Invalid Type: {type(partitions)}") def as_dict(self): return collection_to_dict(self, False, False, Partition.name) diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index f4996b3d..b1df3141 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -40,6 +40,7 @@ from pyslurm.utils.helpers import ( nodelist_to_range_str, instance_to_dict, collection_to_dict, + group_collection_by_cluster, _get_exit_code, ) from pyslurm.db.connection import _open_conn_or_error @@ -75,14 +76,19 @@ cdef class JobFilter: return None qos_id_list = [] - qos = QualitiesOfService.load() - for q in self.qos: - if isinstance(q, int): - qos_id_list.append(q) - elif q in qos: - qos_id_list.append(str(qos[q].id)) - else: - raise ValueError(f"QoS {q} does not exist") + qos_data = QualitiesOfService.load() + for user_input in self.qos: + found = False + for qos in qos_data: + if (qos.id == user_input + or qos.name == user_input + or qos == user_input): + qos_id_list.append(str(qos.id)) + found = True + break + + if not found: + raise ValueError(f"QoS '{user_input}' does not exist") return qos_id_list @@ -202,7 +208,10 @@ cdef class Jobs(list): raise TypeError("Invalid Type: {type(jobs)}") def as_dict(self, by_cluster=False): - return collection_to_dict(self, by_cluster) + return collection_to_dict(self, by_cluster, False, Job.id) + + def group_by_cluster(self): + return group_collection_by_cluster(self) @staticmethod def load(JobFilter db_filter=None, Connection db_connection=None): @@ -246,7 +255,7 @@ cdef class Jobs(list): SlurmList job_data SlurmListItem job_ptr Connection conn - QualitiesOfService qos_data + dict qos_data # Prepare SQL Filter if not db_filter: @@ -263,8 +272,8 @@ cdef class Jobs(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(name_is_key=False, - db_connection=conn) + qos_data = QualitiesOfService.load(db_connection=conn).as_dict( + name_is_key=False) # TODO: also get trackable resources with slurmdb_tres_get and store # it in each job instance. tres_alloc_str and tres_req_str only @@ -360,7 +369,7 @@ cdef class Jobs(list): # Prepare SQL Filter if isinstance(db_filter, Jobs): - job_ids = list(db_filter.keys()) + job_ids = [job.id for job in self] cond = JobFilter(ids=job_ids) else: cond = db_filter From 4f4f18818ee5a3bb0963c18ae0db5503fa04206d Mon Sep 17 00:00:00 2001 From: tazend Date: Fri, 23 Jun 2023 22:31:16 +0200 Subject: [PATCH 10/28] wip --- pyslurm/core/job/job.pyx | 13 ++++++---- pyslurm/core/node.pyx | 14 +++++++---- pyslurm/core/partition.pyx | 17 +++++++------ pyslurm/db/__init__.py | 1 + pyslurm/db/assoc.pyx | 10 ++++++-- pyslurm/db/job.pyx | 34 +++++++++++++++----------- pyslurm/db/qos.pyx | 50 +++++++++++++++++++------------------- pyslurm/db/tres.pxd | 4 +-- pyslurm/db/tres.pyx | 8 +++--- pyslurm/utils/helpers.pyx | 10 +++----- 10 files changed, 90 insertions(+), 71 deletions(-) diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index f42d7f5d..35ff9ec8 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -34,6 +34,7 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.core.error import ( RPCError, verify_rpc, @@ -68,9 +69,9 @@ cdef class Jobs(list): if isinstance(jobs, list): for job in jobs: if isinstance(job, int): - self.extend(Job(job)) + self.append(Job(job)) else: - self.extend(job) + self.append(job) elif isinstance(jobs, str): joblist = jobs.split(",") self.extend([Job(int(job)) for job in joblist]) @@ -79,8 +80,9 @@ cdef class Jobs(list): elif jobs is not None: raise TypeError("Invalid Type: {type(jobs)}") - def as_dict(self): - return collection_to_dict(self, False, False, Job.id) + def as_dict(self, recursive=False): + col = collection_to_dict(self, False, Job.id, recursive) + return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -165,7 +167,7 @@ cdef class Jobs(list): return self reloaded_jobs = Jobs.load().as_dict() - for jid, idx in enumerate(self): + for idx, jid in enumerate(self): if jid in reloaded_jobs: # Put the new data in. new_jobs.append(reloaded_jobs[jid]) @@ -230,6 +232,7 @@ cdef class Job: self.ptr.job_id = job_id self.passwd = {} self.groups = {} + cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) self.steps = JobSteps.__new__(JobSteps) def _alloc_impl(self): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index da2c3258..7e262759 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -28,6 +28,7 @@ from pyslurm.utils import ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -58,9 +59,9 @@ cdef class Nodes(list): if isinstance(nodes, list): for node in nodes: if isinstance(node, str): - self.extend(Node(node)) + self.append(Node(node)) else: - self.extend(node) + self.append(node) elif isinstance(nodes, str): nodelist = nodes.split(",") self.extend([Node(node) for node in nodelist]) @@ -69,8 +70,9 @@ cdef class Nodes(list): elif nodes is not None: raise TypeError("Invalid Type: {type(nodes)}") - def as_dict(self): - return collection_to_dict(self, False, False, Node.name) + def as_dict(self, recursive=False): + col = collection_to_dict(self, False, Node.name, recursive) + return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -156,7 +158,7 @@ cdef class Nodes(list): return self reloaded_nodes = Nodes.load().as_dict() - for node, idx in enumerate(self): + for idx, node in enumerate(self): node_name = node.name if node in reloaded_nodes: # Put the new data in. @@ -240,6 +242,7 @@ cdef class Node: def __init__(self, name=None, **kwargs): self._alloc_impl() self.name = name + self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) @@ -287,6 +290,7 @@ cdef class Node: wrap._alloc_info() wrap.passwd = {} wrap.groups = {} + wrap.cluster = LOCAL_CLUSTER memcpy(wrap.info, in_ptr, sizeof(node_info_t)) return wrap diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 0e1fa013..147e261f 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -30,6 +30,7 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -60,9 +61,9 @@ cdef class Partitions(list): if isinstance(partitions, list): for part in partitions: if isinstance(part, str): - self.extend(Partition(part)) + self.append(Partition(part)) else: - self.extend(part) + self.append(part) elif isinstance(partitions, str): partlist = partitions.split(",") self.extend([Partition(part) for part in partlist]) @@ -71,8 +72,9 @@ cdef class Partitions(list): elif partitions is not None: raise TypeError("Invalid Type: {type(partitions)}") - def as_dict(self): - return collection_to_dict(self, False, False, Partition.name) + def as_dict(self, recursive=False): + col = collection_to_dict(self, False, Partition.name, recursive) + return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -114,7 +116,6 @@ cdef class Partitions(list): partition.power_save_enabled = power_save_enabled partition.slurm_conf = slurm_conf - partition.cluster = slurm_conf.cluster partitions.append(partition) # At this point we memcpy'd all the memory for the Partitions. Setting @@ -147,7 +148,7 @@ cdef class Partitions(list): return self reloaded_parts = Partitions.load().as_dict() - for part, idx in enumerate(self): + for idx, part in enumerate(self): part_name = part.name if part_name in reloaded_parts: # Put the new data in. @@ -196,6 +197,7 @@ cdef class Partition: def __init__(self, name=None, **kwargs): self._alloc_impl() self.name = name + self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) @@ -218,6 +220,7 @@ cdef class Partition: cdef Partition from_ptr(partition_info_t *in_ptr): cdef Partition wrap = Partition.__new__(Partition) wrap._alloc_impl() + wrap.cluster = LOCAL_CLUSTER memcpy(wrap.ptr, in_ptr, sizeof(partition_info_t)) return wrap @@ -259,7 +262,7 @@ cdef class Partition: >>> import pyslurm >>> part = pyslurm.Partition.load("normal") """ - partitions = Partitions.load() + partitions = Partitions.load().as_dict() if name not in partitions: raise RPCError(msg=f"Partition '{name}' doesn't exist") diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index acd36a40..0e78a734 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -42,3 +42,4 @@ Association, AssociationFilter, ) +from . import cluster diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 8697e4ef..08efea96 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -31,6 +31,7 @@ from pyslurm.utils.helpers import ( ) from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error +from pyslurm.db.cluster import LOCAL_CLUSTER cdef class Associations(list): @@ -38,8 +39,12 @@ cdef class Associations(list): def __init__(self): pass - def as_dict(self, group_by_cluster=False): - return collection_to_dict(self, group_by_cluster, True, Association.id) + def as_dict(self, recursive=False, group_by_cluster=False): + col = collection_to_dict(self, False, Association.id, recursive) + if not group_by_cluster: + return col.get(LOCAL_CLUSTER, {}) + + return col def group_by_cluster(self): return group_collection_by_cluster(self) @@ -183,6 +188,7 @@ cdef class Association: def __init__(self, **kwargs): self._alloc_impl() self.id = 0 + self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index b1df3141..f02099cc 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -27,6 +27,7 @@ from pyslurm.core.error import RPCError, PyslurmError from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -104,11 +105,9 @@ cdef class JobFilter: def _parse_clusters(self): if not self.clusters: - # Get the local cluster name # This is a requirement for some other parameters to function # correctly, like self.nodelist - slurm_conf = slurmctld.Config.load() - return [slurm_conf.cluster] + return [LOCAL_CLUSTER] elif self.clusters == "all": return None else: @@ -196,9 +195,9 @@ cdef class Jobs(list): if isinstance(jobs, list): for job in jobs: if isinstance(job, int): - self.extend(Job(job)) + self.append(Job(job)) else: - self.extend(job) + self.append(job) elif isinstance(jobs, str): joblist = jobs.split(",") self.extend([Job(job) for job in joblist]) @@ -207,8 +206,12 @@ cdef class Jobs(list): elif jobs is not None: raise TypeError("Invalid Type: {type(jobs)}") - def as_dict(self, by_cluster=False): - return collection_to_dict(self, by_cluster, False, Job.id) + def as_dict(self, recursive=False, group_by_cluster=False): + col = collection_to_dict(self, False, Job.id, recursive) + if not group_by_cluster: + return col.get(LOCAL_CLUSTER, {}) + + return col def group_by_cluster(self): return group_collection_by_cluster(self) @@ -419,9 +422,10 @@ cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id=0, **kwargs): + def __init__(self, job_id=0, cluster=LOCAL_CLUSTER, **kwargs): self._alloc_impl() self.ptr.jobid = int(job_id) + cstr.fmalloc(&self.ptr.cluster, cluster) for k, v in kwargs.items(): setattr(self, k, v) @@ -445,7 +449,7 @@ cdef class Job: return wrap @staticmethod - def load(job_id, with_script=False, with_env=False): + def load(job_id, cluster=LOCAL_CLUSTER, with_script=False, with_env=False): """Load the information for a specific Job from the Database. Args: @@ -472,13 +476,15 @@ cdef class Job: >>> print(db_job.script) """ - jfilter = JobFilter(ids=[int(job_id)], - with_script=with_script, with_env=with_env) + jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], + with_script=with_script, with_env=with_env) jobs = Jobs.load(jfilter) - if not jobs or job_id not in jobs: - raise RPCError(msg=f"Job {job_id} does not exist") + if not jobs: + raise RPCError(msg=f"Job {job_id} does not exist on " + f"Cluster {cluster}") - return jobs[job_id] + # TODO: There might be multiple entries when job ids were reset. + return jobs[0] def _create_steps(self): cdef: diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 303bc30a..027308fc 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -27,38 +27,17 @@ from pyslurm.utils.helpers import instance_to_dict, collection_to_dict from pyslurm.db.connection import _open_conn_or_error -def _qos_names_to_ids(qos_list, QualitiesOfService data): - cdef list out = [] - if not qos_list: - return None - - return [_validate_qos_single(qid, data) for qid in qos_list] - - -def _validate_qos_single(qid, QualitiesOfService data): - for item in data: - if qid == item.id or qid == item.name: - return item.id - - raise ValueError(f"Invalid QOS specified: {qid}") - - -cdef _set_qos_list(List *in_list, vals, QualitiesOfService data): - qos_ids = _qos_names_to_ids(vals, data) - make_char_list(in_list, qos_ids) - - cdef class QualitiesOfService(list): def __init__(self): pass - def as_dict(self, name_is_key=True): + def as_dict(self, recursive=False, name_is_key=True): identifier = QualityOfService.name if not name_is_key: identifier = QualityOfService.id - return collection_to_dict(self, False, True, identifier) + return collection_to_dict(self, True, identifier, recursive) @staticmethod def load(QualityOfServiceFilter db_filter=None, db_connection=None): @@ -200,10 +179,10 @@ cdef class QualityOfService: """ qfilter = QualityOfServiceFilter(names=[name]) qos_data = QualitiesOfService.load(qfilter) - if not qos_data or name not in qos_data: + if not qos_data: raise RPCError(msg=f"QualityOfService {name} does not exist") - return qos_data[name] + return qos_data[0] @property def name(self): @@ -220,3 +199,24 @@ cdef class QualityOfService: @property def id(self): return self.ptr.id + + +def _qos_names_to_ids(qos_list, QualitiesOfService data): + cdef list out = [] + if not qos_list: + return None + + return [_validate_qos_single(qid, data) for qid in qos_list] + + +def _validate_qos_single(qid, QualitiesOfService data): + for item in data: + if qid == item.id or qid == item.name: + return item.id + + raise ValueError(f"Invalid QOS specified: {qid}") + + +cdef _set_qos_list(List *in_list, vals, QualitiesOfService data): + qos_ids = _qos_names_to_ids(vals, data) + make_char_list(in_list, qos_ids) diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index ef1568f6..41ed1b4d 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -42,7 +42,7 @@ from pyslurm.db.connection cimport Connection cdef find_tres_count(char *tres_str, typ, on_noval=*, on_inf=*) cdef find_tres_limit(char *tres_str, typ) cdef merge_tres_str(char **tres_str, typ, val) -cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data) +cdef _tres_ids_to_names(char *tres_str, dict tres_data) cdef _set_tres_limits(char **dest, TrackableResourceLimits src, TrackableResources tres_data) @@ -62,7 +62,7 @@ cdef class TrackableResourceLimits: license @staticmethod - cdef from_ids(char *tres_id_str, TrackableResources tres_data) + cdef from_ids(char *tres_id_str, dict tres_data) cdef class TrackableResourceFilter: diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index d9d270b5..644cd8b3 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -25,7 +25,7 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict +from pyslurm.utils.helpers import instance_to_dict, collection_to_dict from pyslurm.utils import cstr from pyslurm.db.connection import _open_conn_or_error import json @@ -56,7 +56,7 @@ cdef class TrackableResourceLimits: setattr(self, k, v) @staticmethod - cdef from_ids(char *tres_id_str, TrackableResources tres_data): + cdef from_ids(char *tres_id_str, dict tres_data): tres_list = _tres_ids_to_names(tres_id_str, tres_data) if not tres_list: return None @@ -144,7 +144,7 @@ cdef class TrackableResources(list): if not name_is_key: identifier = TrackableResource.id - return collection_to_dict(self, False, True, identifier) + return collection_to_dict(self, True, identifier) @staticmethod def load(Connection db_connection=None, name_is_key=True): @@ -288,7 +288,7 @@ cdef merge_tres_str(char **tres_str, typ, val): cstr.from_dict(tres_str, current) -cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data): +cdef _tres_ids_to_names(char *tres_str, dict tres_data): if not tres_str: return None diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index dbd81d8e..eeed56a0 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -341,13 +341,13 @@ def instance_to_dict(inst): return out -def collection_to_dict(collection, by_cluster, is_global_data, identifier): +def collection_to_dict(collection, is_global_data, identifier, recursive=False): cdef dict out = {} if is_global_data: for item in collection: _id = identifier.__get__(item) - out[_id] = item + out[_id] = item if not recursive else item.as_dict() return out for item in collection: @@ -356,11 +356,7 @@ def collection_to_dict(collection, by_cluster, is_global_data, identifier): out[cluster] = {} _id = identifier.__get__(item) - out[cluster][_id] = item - - if not by_cluster: - # TODO: Return only local cluster data - return out + out[cluster][_id] = item if not recursive else item.as_dict() return out From 3ebf31f13729edef4fd3f600bba570b0dbd5f723 Mon Sep 17 00:00:00 2001 From: tazend Date: Sat, 24 Jun 2023 21:46:16 +0200 Subject: [PATCH 11/28] wip --- pyslurm/core/job/job.pyx | 27 ++++++++++++++++++++------- pyslurm/core/job/step.pyx | 30 +++++++++++++++++++++++++++--- pyslurm/core/node.pyx | 14 +++++++++++++- pyslurm/core/partition.pyx | 14 +++++++++++++- pyslurm/db/assoc.pyx | 20 +++++++++++++++++++- pyslurm/db/job.pyx | 19 ++++++++++++++++++- pyslurm/db/qos.pyx | 23 ++++++++++++++++++++--- pyslurm/db/tres.pyx | 27 +++++++++++++++++++++++---- pyslurm/utils/helpers.pyx | 26 ++++++++++++++++++-------- 9 files changed, 171 insertions(+), 29 deletions(-) diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 35ff9ec8..2c33d581 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -81,7 +81,18 @@ cdef class Jobs(list): raise TypeError("Invalid Type: {type(jobs)}") def as_dict(self, recursive=False): - col = collection_to_dict(self, False, Job.id, recursive) + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Job.id, recursive=recursive) return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): @@ -185,7 +196,7 @@ cdef class Jobs(list): def load_steps(self): """Load all Job steps for this collection of Jobs. - This function fills in the "steps" attribute for all Jobs in the + This function fills in the `steps` attribute for all Jobs in the collection. !!! note @@ -196,14 +207,16 @@ cdef class Jobs(list): RPCError: When retrieving the Job information for all the Steps failed. """ - cdef dict step_info = JobSteps.load_all() + cdef dict steps = JobSteps.load().as_dict() - for job, idx in enumerate(self): + for idx, job in enumerate(self): # Ignore any Steps from Jobs which do not exist in this # collection. jid = job.id - if jid in step_info: - self[idx].steps = step_info[jid] + if jid in steps: + job_steps = self[idx].steps + job_steps.clear() + job_steps.extend(steps[jid].values()) @property def memory(self): @@ -295,7 +308,7 @@ cdef class Job: if not slurm.IS_JOB_PENDING(wrap.ptr): # Just ignore if the steps couldn't be loaded here. try: - wrap.steps = JobSteps._load(wrap) + wrap.steps = JobSteps._load_single(wrap) except RPCError: pass else: diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index d4038f54..aba430c4 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -33,8 +33,6 @@ from pyslurm.utils.helpers import ( uid_to_name, collection_to_dict, group_collection_by_cluster, - humanize_step_id, - dehumanize_step_id, ) from pyslurm.core.job.util import cpu_freq_int_to_str from pyslurm.utils.ctime import ( @@ -76,7 +74,7 @@ cdef class JobSteps(list): recursive=recursive, group_id=JobStep.job_id) col = col.get(LOCAL_CLUSTER, {}) if self._job_id: - return col.get(self._job_id, {}) + return col.get(self._job_id) return col @@ -444,3 +442,29 @@ cdef class JobStep: @property def slurm_protocol_version(self): return u32_parse(self.ptr.start_protocol_ver) + + +def humanize_step_id(sid): + if sid == slurm.SLURM_BATCH_SCRIPT: + return "batch" + elif sid == slurm.SLURM_EXTERN_CONT: + return "extern" + elif sid == slurm.SLURM_INTERACTIVE_STEP: + return "interactive" + elif sid == slurm.SLURM_PENDING_STEP: + return "pending" + else: + return sid + + +def dehumanize_step_id(sid): + if sid == "batch": + return slurm.SLURM_BATCH_SCRIPT + elif sid == "extern": + return slurm.SLURM_EXTERN_CONT + elif sid == "interactive": + return slurm.SLURM_INTERACTIVE_STEP + elif sid == "pending": + return slurm.SLURM_PENDING_STEP + else: + return int(sid) diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 7e262759..609016fe 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -71,7 +71,19 @@ cdef class Nodes(list): raise TypeError("Invalid Type: {type(nodes)}") def as_dict(self, recursive=False): - col = collection_to_dict(self, False, Node.name, recursive) + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Node.name, + recursive=recursive) return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 147e261f..56375d33 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -73,7 +73,19 @@ cdef class Partitions(list): raise TypeError("Invalid Type: {type(partitions)}") def as_dict(self, recursive=False): - col = collection_to_dict(self, False, Partition.name, recursive) + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Partition.name, + recursive=recursive) return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 08efea96..d1ac4789 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -40,7 +40,25 @@ cdef class Associations(list): pass def as_dict(self, recursive=False, group_by_cluster=False): - col = collection_to_dict(self, False, Association.id, recursive) + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + group_by_cluster (bool, optional): + By default, only the Jobs from your local Cluster are + returned. If this is set to `True`, then all the Jobs in the + collection will be grouped by the Cluster - with the name of + the cluster as the key and the value being the collection as + another dict. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Association.id, + recursive=recursive) if not group_by_cluster: return col.get(LOCAL_CLUSTER, {}) diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index f02099cc..f83552af 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -207,7 +207,24 @@ cdef class Jobs(list): raise TypeError("Invalid Type: {type(jobs)}") def as_dict(self, recursive=False, group_by_cluster=False): - col = collection_to_dict(self, False, Job.id, recursive) + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + group_by_cluster (bool, optional): + By default, only the Jobs from your local Cluster are + returned. If this is set to `True`, then all the Jobs in the + collection will be grouped by the Cluster - with the name of + the cluster as the key and the value being the collection as + another dict. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Job.id, recursive=recursive) if not group_by_cluster: return col.get(LOCAL_CLUSTER, {}) diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 027308fc..a01ef9b0 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,7 +23,7 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict +from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global from pyslurm.db.connection import _open_conn_or_error @@ -33,14 +33,31 @@ cdef class QualitiesOfService(list): pass def as_dict(self, recursive=False, name_is_key=True): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + name_is_key (bool, optional): + By default, the keys in this dict are the names of each QoS. + If this is set to `False`, then the unique ID of the QoS will + be used as dict keys. + + Returns: + (dict): Collection as a dict. + """ identifier = QualityOfService.name if not name_is_key: identifier = QualityOfService.id - return collection_to_dict(self, True, identifier, recursive) + return collection_to_dict_global(self, identifier=identifier, + recursive=recursive) @staticmethod - def load(QualityOfServiceFilter db_filter=None, db_connection=None): + def load(QualityOfServiceFilter db_filter=None, + Connection db_connection=None): cdef: QualitiesOfService out = QualitiesOfService() QualityOfService qos diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index 644cd8b3..df93dda0 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -25,7 +25,7 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict +from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global from pyslurm.utils import cstr from pyslurm.db.connection import _open_conn_or_error import json @@ -139,15 +139,31 @@ cdef class TrackableResources(list): def __init__(self): pass - def as_dict(self, name_is_key=True): + def as_dict(self, recursive=False, name_is_key=True): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + name_is_key (bool, optional): + By default, the keys in this dict are the names of each TRES. + If this is set to `False`, then the unique ID of the TRES will + be used as dict keys. + + Returns: + (dict): Collection as a dict. + """ identifier = TrackableResource.type_and_name if not name_is_key: identifier = TrackableResource.id - return collection_to_dict(self, True, identifier) + return collection_to_dict_global(self, identifier=identifier, + recursive=recursive) @staticmethod - def load(Connection db_connection=None, name_is_key=True): + def load(Connection db_connection=None): cdef: TrackableResources out = TrackableResources() TrackableResource tres @@ -230,6 +246,9 @@ cdef class TrackableResource: wrap.ptr = in_ptr return wrap + def as_dict(self): + return instance_to_dict(self) + @property def id(self): return self.ptr.id diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index eeed56a0..092d2ff9 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -341,23 +341,33 @@ def instance_to_dict(inst): return out -def collection_to_dict(collection, is_global_data, identifier, recursive=False): +def collection_to_dict(collection, identifier, recursive=False, group_id=None): cdef dict out = {} - if is_global_data: - for item in collection: - _id = identifier.__get__(item) - out[_id] = item if not recursive else item.as_dict() - return out - for item in collection: cluster = item.cluster if cluster not in out: out[cluster] = {} _id = identifier.__get__(item) - out[cluster][_id] = item if not recursive else item.as_dict() + data = item if not recursive else item.as_dict() + + if group_id: + grp_id = group_id.__get__(item) + if grp_id not in out[cluster]: + out[cluster][grp_id] = {} + out[cluster][grp_id].update({_id: data}) + else: + out[cluster][_id] = data + + return out + +def collection_to_dict_global(collection, identifier, recursive=False): + cdef dict out = {} + for item in collection: + _id = identifier.__get__(item) + out[_id] = item if not recursive else item.as_dict() return out From af49453e5857c5c3a4791814a934a3b8575a8822 Mon Sep 17 00:00:00 2001 From: tazend Date: Sun, 25 Jun 2023 17:57:46 +0200 Subject: [PATCH 12/28] wip --- pyslurm/core/job/step.pyx | 30 +++--------------------------- pyslurm/utils/helpers.pyx | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index aba430c4..d4038f54 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -33,6 +33,8 @@ from pyslurm.utils.helpers import ( uid_to_name, collection_to_dict, group_collection_by_cluster, + humanize_step_id, + dehumanize_step_id, ) from pyslurm.core.job.util import cpu_freq_int_to_str from pyslurm.utils.ctime import ( @@ -74,7 +76,7 @@ cdef class JobSteps(list): recursive=recursive, group_id=JobStep.job_id) col = col.get(LOCAL_CLUSTER, {}) if self._job_id: - return col.get(self._job_id) + return col.get(self._job_id, {}) return col @@ -442,29 +444,3 @@ cdef class JobStep: @property def slurm_protocol_version(self): return u32_parse(self.ptr.start_protocol_ver) - - -def humanize_step_id(sid): - if sid == slurm.SLURM_BATCH_SCRIPT: - return "batch" - elif sid == slurm.SLURM_EXTERN_CONT: - return "extern" - elif sid == slurm.SLURM_INTERACTIVE_STEP: - return "interactive" - elif sid == slurm.SLURM_PENDING_STEP: - return "pending" - else: - return sid - - -def dehumanize_step_id(sid): - if sid == "batch": - return slurm.SLURM_BATCH_SCRIPT - elif sid == "extern": - return slurm.SLURM_EXTERN_CONT - elif sid == "interactive": - return slurm.SLURM_INTERACTIVE_STEP - elif sid == "pending": - return slurm.SLURM_PENDING_STEP - else: - return int(sid) diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index 092d2ff9..fb1d2201 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -406,3 +406,29 @@ def _get_exit_code(exit_code): exit_state -= 128 return exit_state, sig + + +def humanize_step_id(sid): + if sid == slurm.SLURM_BATCH_SCRIPT: + return "batch" + elif sid == slurm.SLURM_EXTERN_CONT: + return "extern" + elif sid == slurm.SLURM_INTERACTIVE_STEP: + return "interactive" + elif sid == slurm.SLURM_PENDING_STEP: + return "pending" + else: + return sid + + +def dehumanize_step_id(sid): + if sid == "batch": + return slurm.SLURM_BATCH_SCRIPT + elif sid == "extern": + return slurm.SLURM_EXTERN_CONT + elif sid == "interactive": + return slurm.SLURM_INTERACTIVE_STEP + elif sid == "pending": + return slurm.SLURM_PENDING_STEP + else: + return int(sid) From 753c26bfb172c318b774dcccb1f5e5db207f2e71 Mon Sep 17 00:00:00 2001 From: tazend Date: Sun, 25 Jun 2023 18:14:27 +0200 Subject: [PATCH 13/28] wip --- pyslurm/db/job.pyx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index f83552af..636e1137 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -231,6 +231,11 @@ cdef class Jobs(list): return col def group_by_cluster(self): + """Group Jobs by cluster name + + Returns: + (dict[str, Jobs]): Jobs grouped by cluster. + """ return group_collection_by_cluster(self) @staticmethod From 1601128aadc8ba2f27899eacf3e04ddd5e22ce78 Mon Sep 17 00:00:00 2001 From: Toni Harzendorf Date: Wed, 5 Jul 2023 19:16:48 +0200 Subject: [PATCH 14/28] wip --- pyslurm/core/partition.pxd | 3 +- pyslurm/core/partition.pyx | 59 ++++----- pyslurm/utils/collections.pxd | 34 +++++ pyslurm/utils/collections.pyx | 225 ++++++++++++++++++++++++++++++++++ 4 files changed, 288 insertions(+), 33 deletions(-) create mode 100644 pyslurm/utils/collections.pxd create mode 100644 pyslurm/utils/collections.pyx diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index b10366b8..89de12e8 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -56,9 +56,10 @@ from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * from pyslurm.core cimport slurmctld +from pyslurm.utils.collections cimport MultiClusterCollection -cdef class Partitions(list): +cdef class Partitions(MultiClusterCollection): """A collection of [pyslurm.Partition][] objects. Args: diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 56375d33..6ff1edb7 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -49,7 +49,7 @@ from pyslurm.utils.ctime import ( ) -cdef class Partitions(list): +cdef class Partitions(MultiClusterCollection): def __dealloc__(self): slurm_free_partition_info_msg(self.info) @@ -58,35 +58,26 @@ cdef class Partitions(list): self.info = NULL def __init__(self, partitions=None): - if isinstance(partitions, list): - for part in partitions: - if isinstance(part, str): - self.append(Partition(part)) - else: - self.append(part) - elif isinstance(partitions, str): - partlist = partitions.split(",") - self.extend([Partition(part) for part in partlist]) - elif isinstance(partitions, dict): - self.extend([part for part in partitions.values()]) - elif partitions is not None: - raise TypeError("Invalid Type: {type(partitions)}") - - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Partition.name, - recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) + super().__init__(data=partitions, + col_type="Partitions", + col_val_type=Partition, + col_key_type=str) + +# def as_dict(self, recursive=False): +# """Convert the collection data to a dict. + +# Args: +# recursive (bool, optional): +# By default, the objects will not be converted to a dict. If +# this is set to `True`, then additionally all objects are +# converted to dicts. + +# Returns: +# (dict): Collection as a dict. +# """ +# col = collection_to_dict(self, identifier=Partition.name, +# recursive=recursive) +# return col.get(LOCAL_CLUSTER, {}) def group_by_cluster(self): return group_collection_by_cluster(self) @@ -103,7 +94,7 @@ cdef class Partitions(list): failed. """ cdef: - Partitions partitions = Partitions.__new__(Partitions) + Partitions partitions = Partitions() int flags = slurm.SHOW_ALL Partition partition slurmctld.Config slurm_conf @@ -128,7 +119,7 @@ cdef class Partitions(list): partition.power_save_enabled = power_save_enabled partition.slurm_conf = slurm_conf - partitions.append(partition) + partitions.data[LOCAL_CLUSTER][partition.name] = partition # At this point we memcpy'd all the memory for the Partitions. Setting # this to 0 will prevent the slurm partition free function to @@ -357,6 +348,10 @@ cdef class Partition: def name(self): return cstr.to_unicode(self.ptr.name) + @property + def _id(self): + return self.name + @name.setter def name(self, val): cstr.fmalloc(&self.ptr.name, val) diff --git a/pyslurm/utils/collections.pxd b/pyslurm/utils/collections.pxd new file mode 100644 index 00000000..67b4208f --- /dev/null +++ b/pyslurm/utils/collections.pxd @@ -0,0 +1,34 @@ +######################################################################### +# collections.pxd - pyslurm custom collections +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +cdef class MultiClusterCollection: + + cdef readonly dict data + + cdef: + _col_type + _col_key_type + _col_val_type + id_attr diff --git a/pyslurm/utils/collections.pyx b/pyslurm/utils/collections.pyx new file mode 100644 index 00000000..eab891410 --- /dev/null +++ b/pyslurm/utils/collections.pyx @@ -0,0 +1,225 @@ +######################################################################### +# collections.pyx - pyslurm custom collections +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.db.cluster import LOCAL_CLUSTER +import copy + + +class MCValuesView: + + def __init__(self, mcm): + self._mcm = mcm + + def __len__(self): + return sum(len(data) for data in self._mcm.data.values()) + + def __contains__(self, val): + for item in self._mcm: + if item is val or item == val: + return True + return False + + def __iter__(self): + for item in self._mcm: + yield item + + def __repr__(self): + return f'{self.__class__.__name__}({", ".join(map(repr, self))})' + + +class MCItemsView: + + def __init__(self, mcm): + self._mcm = mcm + + def __len__(self): + return len(self._mcm.values()) + + def __contains__(self, item): + cluster = LOCAL_CLUSTER + if len(item) == 3: + cluster, key, val = item + else: + key, val = item + + try: + out = self._mcm.data[cluster][key] + except KeyError: + return False + else: + return out is val or out == val + + def __iter__(self): + for cluster, data in self._mcm.data.items(): + for key in data: + yield (cluster, key, data[key]) + + def __repr__(self): + return f'{self.__class__.__name__}({", ".join(map(repr, MCValuesView(self._mcm)))})' + + +cdef class MultiClusterCollection: + + def __init__(self, data, col_type=None, + col_val_type=None, col_key_type=None, init_data=True): + self.data = data if data else {LOCAL_CLUSTER: {}} + self._col_type = col_type + self._col_key_type = col_key_type + self._col_val_type = col_val_type + if init_data: + self._init_data(data) + + def _init_data(self, data): + if isinstance(data, list): + for item in data: + if isinstance(item, self._col_key_type): + item = self._col_val_type(item) + self.data[LOCAL_CLUSTER].update({item._id: item}) + elif isinstance(data, str): + itemlist = data.split(",") + items = {item:self._col_val_type(item) for item in itemlist} + self.data[LOCAL_CLUSTER].update(items) + #elif isinstance(data, dict): + # self.extend([item for item in data.values()]) + elif data is not None: + raise TypeError("Invalid Type: {type(data)}") + + def __getitem__(self, item): + cluster = LOCAL_CLUSTER + key = item + + if isinstance(item, self._col_val_type): + cluster, key = item.cluster, item._id + if isinstance(item, tuple) and len(item) == 2: + cluster, key = item + return self.data[cluster][key] + + def __setitem__(self, where, item): + cluster = LOCAL_CLUSTER + key = where + + if isinstance(where, tuple) and len(where) == 2: + cluster, key = where + self.data[cluster][key] = item + + def __delitem__(self, item): + cluster = LOCAL_CLUSTER + key = item + + if isinstance(item, self._col_val_type): + cluster, key = item.cluster, item._id + if isinstance(item, tuple) and len(item) == 2: + cluster, key = item + + del self.data[cluster][key] + + def __len__(self): + sum(len(data) for data in self.data.values()) + + def __repr__(self): + return f'{self._col_type}([{", ".join(map(repr, self))}])' + + def __contains__(self, item): + if isinstance(item, self._col_val_type): + return self._check_for_value(item._id, item.cluster) + elif isinstance(item, self._col_key_type): + return self._check_for_value(item, LOCAL_CLUSTER) + elif isinstance(item, tuple): + cluster, item = item + return self._check_for_value(item, cluster) + + return False + + def _check_for_value(self, val_id, cluster): + cluster_data = self.data.get(cluster) + if cluster_data and val_id in cluster_data: + return True + return False + +# def __copy__(self): +# return self.copy() + +# def copy(self): +# return MultiClusterMap( +# data=self.data.copy(), +# col_type=self._col_type, +# col_key_type=self._col_key_type, +# ) + + def __iter__(self): + for cluster in self.data.values(): + for item in cluster.values(): + yield item + + def get(self, key, default=None, cluster=None): + cluster = LOCAL_CLUSTER if not cluster else cluster + cluster_data = self.data.get(cluster, {}) + return cluster_data.get(key, default) + + def add(self, item): + if item.cluster not in self.data: + self.data[item.cluster] = {} + self.data[item.cluster][item._id] = item + + def remove(self, item): + cluster = LOCAL_CLUSTER + key = item + + if isinstance(item, tuple) and len(item) == 2: + cluster, key = item + del self.data[cluster][key] + elif isinstance(item, self._col_val_type): + if item.cluster in self.data and item._id in self.data[item.cluster]: + del self.data[item.cluster][item._id] + elif isinstance(item, self._col_key_type): + del self.data[cluster][key] + + def as_dict(self, recursive=False): + return self.data + + def items(self): + return MCItemsView(self) + + def values(self): + return self + + def popitem(self): + try: + item = next(iter(self)) + except StopIteration: + raise KeyError from None + + del self.data[item.cluster][item._id] + return item + + def clear(self): + self.data.clear() + + def pop(self, key, default=None, cluster=None): + item = self.get(key, default, cluster) + if not item: + return default + + del self.data[cluster][key] + return item From a4632b7322db380d506fd36f44b3bc1c1cbe79e9 Mon Sep 17 00:00:00 2001 From: tazend Date: Wed, 5 Jul 2023 21:40:54 +0200 Subject: [PATCH 15/28] wip --- pyslurm/utils/collections.pxd | 3 +- pyslurm/utils/collections.pyx | 148 ++++++++++++++++++++++------------ 2 files changed, 98 insertions(+), 53 deletions(-) diff --git a/pyslurm/utils/collections.pxd b/pyslurm/utils/collections.pxd index 67b4208f..5252174d 100644 --- a/pyslurm/utils/collections.pxd +++ b/pyslurm/utils/collections.pxd @@ -25,10 +25,9 @@ cdef class MultiClusterCollection: - cdef readonly dict data + cdef public dict data cdef: _col_type _col_key_type _col_val_type - id_attr diff --git a/pyslurm/utils/collections.pyx b/pyslurm/utils/collections.pyx index eab891410..933a5c95 100644 --- a/pyslurm/utils/collections.pyx +++ b/pyslurm/utils/collections.pyx @@ -23,16 +23,23 @@ # cython: language_level=3 from pyslurm.db.cluster import LOCAL_CLUSTER -import copy -class MCValuesView: +class BaseView: def __init__(self, mcm): self._mcm = mcm + self._data = mcm.data def __len__(self): - return sum(len(data) for data in self._mcm.data.values()) + return len(self._mcm) + + def __repr__(self): + data = ", ".join(map(repr, self)) + return f'{self.__class__.__name__}([{data}])' + + +class ValuesView(BaseView): def __contains__(self, val): for item in self._mcm: @@ -44,24 +51,58 @@ class MCValuesView: for item in self._mcm: yield item - def __repr__(self): - return f'{self.__class__.__name__}({", ".join(map(repr, self))})' +class MCKeysView(BaseView): -class MCItemsView: + def __contains__(self, item): + cluster, key, = item + return key in self._data[cluster] - def __init__(self, mcm): - self._mcm = mcm + def __iter__(self): + for cluster, keys in self._data.items(): + for key in keys: + yield (cluster, key) - def __len__(self): - return len(self._mcm.values()) + +class KeysView(BaseView): + + def __contains__(self, item): + return item in self._mcm + + def __iter__(self): + for cluster, keys in self._data.items(): + yield from keys + + def with_cluster(self): + return MCKeysView(self._mcm) + + +class ItemsView(BaseView): def __contains__(self, item): - cluster = LOCAL_CLUSTER - if len(item) == 3: - cluster, key, val = item + key, val = item + cluster = self._mcm._get_cluster() + + try: + out = self._mcm.data[cluster][key] + except KeyError: + return False else: - key, val = item + return out is val or out == val + + def __iter__(self): + for cluster, data in self._mcm.data.items(): + for key in data: + yield (key, data[key]) + + def with_cluster(self): + return MCItemsView(self._mcm) + + +class MCItemsView(BaseView): + + def __contains__(self, item): + cluster, key, val = item try: out = self._mcm.data[cluster][key] @@ -75,9 +116,6 @@ class MCItemsView: for key in data: yield (cluster, key, data[key]) - def __repr__(self): - return f'{self.__class__.__name__}({", ".join(map(repr, MCValuesView(self._mcm)))})' - cdef class MultiClusterCollection: @@ -105,33 +143,26 @@ cdef class MultiClusterCollection: elif data is not None: raise TypeError("Invalid Type: {type(data)}") - def __getitem__(self, item): - cluster = LOCAL_CLUSTER + def _get_key_and_cluster(self, item): + cluster = self._get_cluster() key = item if isinstance(item, self._col_val_type): cluster, key = item.cluster, item._id - if isinstance(item, tuple) and len(item) == 2: + elif isinstance(item, tuple) and len(item) == 2: cluster, key = item + return cluster, key + + def __getitem__(self, item): + cluster, key = self._get_key_and_cluster(item) return self.data[cluster][key] def __setitem__(self, where, item): - cluster = LOCAL_CLUSTER - key = where - - if isinstance(where, tuple) and len(where) == 2: - cluster, key = where + cluster, key = self._get_key_and_cluster(where) self.data[cluster][key] = item def __delitem__(self, item): - cluster = LOCAL_CLUSTER - key = item - - if isinstance(item, self._col_val_type): - cluster, key = item.cluster, item._id - if isinstance(item, tuple) and len(item) == 2: - cluster, key = item - + cluster, key = self._get_key_and_cluster(item) del self.data[cluster][key] def __len__(self): @@ -144,7 +175,7 @@ cdef class MultiClusterCollection: if isinstance(item, self._col_val_type): return self._check_for_value(item._id, item.cluster) elif isinstance(item, self._col_key_type): - return self._check_for_value(item, LOCAL_CLUSTER) + return self._check_for_value(item, self._get_cluster()) elif isinstance(item, tuple): cluster, item = item return self._check_for_value(item, cluster) @@ -157,23 +188,31 @@ cdef class MultiClusterCollection: return True return False -# def __copy__(self): -# return self.copy() + def _get_cluster(self): + if LOCAL_CLUSTER in self.data: + return LOCAL_CLUSTER + else: + return next(iter(self.keys())) -# def copy(self): -# return MultiClusterMap( -# data=self.data.copy(), -# col_type=self._col_type, -# col_key_type=self._col_key_type, -# ) + def __copy__(self): + return self.copy() + + def copy(self): + return MultiClusterCollection( + data=self.data.copy(), + col_type=self._col_type, + col_key_type=self._col_key_type, + col_val_type=self._col_val_type, + init_data=False, + ) def __iter__(self): for cluster in self.data.values(): for item in cluster.values(): yield item - def get(self, key, default=None, cluster=None): - cluster = LOCAL_CLUSTER if not cluster else cluster + def get(self, key, cluster=None, default=None): + cluster = self._get_cluster() if not cluster else cluster cluster_data = self.data.get(cluster, {}) return cluster_data.get(key, default) @@ -183,23 +222,30 @@ cdef class MultiClusterCollection: self.data[item.cluster][item._id] = item def remove(self, item): - cluster = LOCAL_CLUSTER + cluster = self._get_cluster() key = item - if isinstance(item, tuple) and len(item) == 2: + if isinstance(item, self._col_val_type): + if self._check_for_value(item._id, item.cluster): + cluster = item.cluster + del self.data[item.cluster][item._id] + elif isinstance(item, tuple) and len(item) == 2: cluster, key = item del self.data[cluster][key] - elif isinstance(item, self._col_val_type): - if item.cluster in self.data and item._id in self.data[item.cluster]: - del self.data[item.cluster][item._id] elif isinstance(item, self._col_key_type): del self.data[cluster][key] + if not self.data[cluster]: + del self.data[cluster] + def as_dict(self, recursive=False): return self.data + + def keys(self): + return KeysView(self) def items(self): - return MCItemsView(self) + return ItemsView(self) def values(self): return self @@ -216,7 +262,7 @@ cdef class MultiClusterCollection: def clear(self): self.data.clear() - def pop(self, key, default=None, cluster=None): + def pop(self, key, cluster=None, default=None): item = self.get(key, default, cluster) if not item: return default From 940acd12ef464e179f7c2a019bd1adff264a423b Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 6 Jul 2023 21:42:32 +0200 Subject: [PATCH 16/28] wip --- pyslurm/core/partition.pyx | 48 ++++++-------------------- pyslurm/utils/collections.pyx | 64 +++++++++++++++++++++++++++++++---- 2 files changed, 68 insertions(+), 44 deletions(-) diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 6ff1edb7..718f5553 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -31,6 +31,7 @@ from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.utils import collections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -38,8 +39,6 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, _sum_prop, dehumanize, ) @@ -63,25 +62,6 @@ cdef class Partitions(MultiClusterCollection): col_val_type=Partition, col_key_type=str) -# def as_dict(self, recursive=False): -# """Convert the collection data to a dict. - -# Args: -# recursive (bool, optional): -# By default, the objects will not be converted to a dict. If -# this is set to `True`, then additionally all objects are -# converted to dicts. - -# Returns: -# (dict): Collection as a dict. -# """ -# col = collection_to_dict(self, identifier=Partition.name, -# recursive=recursive) -# return col.get(LOCAL_CLUSTER, {}) - - def group_by_cluster(self): - return group_collection_by_cluster(self) - @staticmethod def load(): """Load all Partitions in the system. @@ -117,9 +97,13 @@ cdef class Partitions(MultiClusterCollection): # is raised by replacing it with a zeroed-out partition_info_t. partitions.info.partition_array[cnt] = partitions.tmp_info + cluster = partition.cluster + if cluster not in partitions.data: + partitions.data[cluster] = {} + partition.power_save_enabled = power_save_enabled partition.slurm_conf = slurm_conf - partitions.data[LOCAL_CLUSTER][partition.name] = partition + partitions.data[cluster][partition.name] = partition # At this point we memcpy'd all the memory for the Partitions. Setting # this to 0 will prevent the slurm partition free function to @@ -145,19 +129,7 @@ cdef class Partitions(MultiClusterCollection): Raises: RPCError: When getting the Partitions from the slurmctld failed. """ - cdef dict reloaded_parts - - if not self: - return self - - reloaded_parts = Partitions.load().as_dict() - for idx, part in enumerate(self): - part_name = part.name - if part_name in reloaded_parts: - # Put the new data in. - self[idx] = reloaded_parts[part_name] - - return self + return collections.multi_reload(self) def modify(self, changes): """Modify all Partitions in a Collection. @@ -180,7 +152,7 @@ cdef class Partitions(MultiClusterCollection): >>> # Apply the changes to all the partitions >>> parts.modify(changes) """ - for part in self: + for part in self.values(): part.modify(changes) @property @@ -219,6 +191,9 @@ cdef class Partition: def __dealloc__(self): self._dealloc_impl() + def __repr__(self): + return f'{self.__class__.__name__}({self.name})' + @staticmethod cdef Partition from_ptr(partition_info_t *in_ptr): cdef Partition wrap = Partition.__new__(Partition) @@ -332,7 +307,6 @@ cdef class Partition: """ cdef delete_part_msg_t del_part_msg memset(&del_part_msg, 0, sizeof(del_part_msg)) - del_part_msg.name = cstr.from_unicode(self._error_or_name()) verify_rpc(slurm_delete_partition(&del_part_msg)) diff --git a/pyslurm/utils/collections.pyx b/pyslurm/utils/collections.pyx index 933a5c95..f758154a 100644 --- a/pyslurm/utils/collections.pyx +++ b/pyslurm/utils/collections.pyx @@ -121,7 +121,7 @@ cdef class MultiClusterCollection: def __init__(self, data, col_type=None, col_val_type=None, col_key_type=None, init_data=True): - self.data = data if data else {LOCAL_CLUSTER: {}} + self.data = data if data else {} self._col_type = col_type self._col_key_type = col_key_type self._col_val_type = col_val_type @@ -133,11 +133,14 @@ cdef class MultiClusterCollection: for item in data: if isinstance(item, self._col_key_type): item = self._col_val_type(item) + if LOCAL_CLUSTER not in self.data: + self.data[LOCAL_CLUSTER] = {} + self.data[LOCAL_CLUSTER].update({item._id: item}) elif isinstance(data, str): itemlist = data.split(",") items = {item:self._col_val_type(item) for item in itemlist} - self.data[LOCAL_CLUSTER].update(items) + self.data[LOCAL_CLUSTER] = items #elif isinstance(data, dict): # self.extend([item for item in data.values()]) elif data is not None: @@ -189,7 +192,7 @@ cdef class MultiClusterCollection: return False def _get_cluster(self): - if LOCAL_CLUSTER in self.data: + if not self.data or LOCAL_CLUSTER in self.data: return LOCAL_CLUSTER else: return next(iter(self.keys())) @@ -198,19 +201,24 @@ cdef class MultiClusterCollection: return self.copy() def copy(self): - return MultiClusterCollection( + out = self.__class__.__new__(self.__class__) + super(self.__class__, out).__init__( data=self.data.copy(), col_type=self._col_type, col_key_type=self._col_key_type, col_val_type=self._col_val_type, init_data=False, ) + return out def __iter__(self): for cluster in self.data.values(): for item in cluster.values(): yield item + def __bool__(self): + return bool(self.data) + def get(self, key, cluster=None, default=None): cluster = self._get_cluster() if not cluster else cluster cluster_data = self.data.get(cluster, {}) @@ -238,8 +246,17 @@ cdef class MultiClusterCollection: if not self.data[cluster]: del self.data[cluster] - def as_dict(self, recursive=False): - return self.data + def as_dict(self, recursive=False, multi_cluster=False): + cdef dict out = self.data.get(self._get_cluster(), {}) + + if multi_cluster: + if recursive: + return multi_dict_recursive(self) + return self.data + elif recursive: + return dict_recursive(out) + + return out def keys(self): return KeysView(self) @@ -263,9 +280,42 @@ cdef class MultiClusterCollection: self.data.clear() def pop(self, key, cluster=None, default=None): - item = self.get(key, default, cluster) + item = self.get(key, cluster=cluster, default=default) if not item: return default del self.data[cluster][key] return item + + +def multi_reload(collection): + if not collection: + return collection + + new_data = collection.__class__.load() + for cluster, item in collection.keys().with_cluster(): + if (cluster, item) in new_data.keys().with_cluster(): + collection.data[cluster][item] = new_data.data[cluster][item] + return collection + + +def dict_recursive(collection): + cdef dict out = {} + for item_id, item in collection.items(): + out[item_id] = item.as_dict() + return out + + +def multi_dict_recursive(collection): + cdef dict out = collection.data.copy() + for cluster, data in collection.data.items(): + out[cluster] = dict_recursive(data) +# if group_id: +# grp_id = group_id.__get__(item) +# if grp_id not in out[cluster]: +# out[cluster][grp_id] = {} +# out[cluster][grp_id].update({_id: data}) +# else: +# out[cluster][_id] = data + + return out From e45e040970ceeb472c588b1d3c1a96740a4f9514 Mon Sep 17 00:00:00 2001 From: tazend Date: Fri, 7 Jul 2023 21:03:00 +0200 Subject: [PATCH 17/28] wip --- pyslurm/{utils => }/collections.pxd | 9 ++- pyslurm/{utils => }/collections.pyx | 110 ++++++++++++++++++---------- pyslurm/core/job/job.pxd | 6 +- pyslurm/core/job/job.pyx | 70 +++++------------- pyslurm/core/node.pxd | 3 +- pyslurm/core/node.pyx | 80 +++++--------------- pyslurm/core/partition.pxd | 4 +- pyslurm/core/partition.pyx | 16 ++-- 8 files changed, 126 insertions(+), 172 deletions(-) rename pyslurm/{utils => }/collections.pxd (91%) rename pyslurm/{utils => }/collections.pyx (74%) diff --git a/pyslurm/utils/collections.pxd b/pyslurm/collections.pxd similarity index 91% rename from pyslurm/utils/collections.pxd rename to pyslurm/collections.pxd index 5252174d..5c43d932 100644 --- a/pyslurm/utils/collections.pxd +++ b/pyslurm/collections.pxd @@ -23,11 +23,12 @@ # cython: language_level=3 -cdef class MultiClusterCollection: +cdef class MultiClusterMap: cdef public dict data cdef: - _col_type - _col_key_type - _col_val_type + _typ + _key_type + _val_type + _id_attr diff --git a/pyslurm/utils/collections.pyx b/pyslurm/collections.pyx similarity index 74% rename from pyslurm/utils/collections.pyx rename to pyslurm/collections.pyx index f758154a..a439ec81 100644 --- a/pyslurm/utils/collections.pyx +++ b/pyslurm/collections.pyx @@ -117,29 +117,30 @@ class MCItemsView(BaseView): yield (cluster, key, data[key]) -cdef class MultiClusterCollection: +cdef class MultiClusterMap: - def __init__(self, data, col_type=None, - col_val_type=None, col_key_type=None, init_data=True): + def __init__(self, data, typ=None, + val_type=None, key_type=None, id_attr=None, init_data=True): self.data = data if data else {} - self._col_type = col_type - self._col_key_type = col_key_type - self._col_val_type = col_val_type + self._typ = typ + self._key_type = key_type + self._val_type = val_type + self._id_attr = id_attr if init_data: self._init_data(data) def _init_data(self, data): if isinstance(data, list): for item in data: - if isinstance(item, self._col_key_type): - item = self._col_val_type(item) + if isinstance(item, self._key_type): + item = self._val_type(item) if LOCAL_CLUSTER not in self.data: self.data[LOCAL_CLUSTER] = {} - self.data[LOCAL_CLUSTER].update({item._id: item}) + self.data[LOCAL_CLUSTER].update({self._item_id(item): item}) elif isinstance(data, str): itemlist = data.split(",") - items = {item:self._col_val_type(item) for item in itemlist} + items = {item:self._val_type(item) for item in itemlist} self.data[LOCAL_CLUSTER] = items #elif isinstance(data, dict): # self.extend([item for item in data.values()]) @@ -150,34 +151,46 @@ cdef class MultiClusterCollection: cluster = self._get_cluster() key = item - if isinstance(item, self._col_val_type): - cluster, key = item.cluster, item._id + if isinstance(item, self._val_type): + cluster, key = item.cluster, self._item_id(item) elif isinstance(item, tuple) and len(item) == 2: cluster, key = item return cluster, key + def _item_id(self, item): + return self._id_attr.__get__(item) + def __getitem__(self, item): + if item in self.data: + return self.data[item] + cluster, key = self._get_key_and_cluster(item) return self.data[cluster][key] def __setitem__(self, where, item): - cluster, key = self._get_key_and_cluster(where) - self.data[cluster][key] = item + if where in self.data: + self.data[where] = item + else: + cluster, key = self._get_key_and_cluster(where) + self.data[cluster][key] = item def __delitem__(self, item): - cluster, key = self._get_key_and_cluster(item) - del self.data[cluster][key] + if item in self.data: + del self.data[item] + else: + cluster, key = self._get_key_and_cluster(item) + del self.data[cluster][key] def __len__(self): sum(len(data) for data in self.data.values()) def __repr__(self): - return f'{self._col_type}([{", ".join(map(repr, self))}])' + return f'{self._typ}([{", ".join(map(repr, self))}])' def __contains__(self, item): - if isinstance(item, self._col_val_type): - return self._check_for_value(item._id, item.cluster) - elif isinstance(item, self._col_key_type): + if isinstance(item, self._val_type): + return self._check_for_value(self._item_id(item), item.cluster) + elif isinstance(item, self._key_type): return self._check_for_value(item, self._get_cluster()) elif isinstance(item, tuple): cluster, item = item @@ -204,9 +217,9 @@ cdef class MultiClusterCollection: out = self.__class__.__new__(self.__class__) super(self.__class__, out).__init__( data=self.data.copy(), - col_type=self._col_type, - col_key_type=self._col_key_type, - col_val_type=self._col_val_type, + typ=self._typ, + key_type=self._key_type, + val_type=self._val_type, init_data=False, ) return out @@ -221,26 +234,25 @@ cdef class MultiClusterCollection: def get(self, key, cluster=None, default=None): cluster = self._get_cluster() if not cluster else cluster - cluster_data = self.data.get(cluster, {}) - return cluster_data.get(key, default) + return self.data.get(cluster, {}).get(key, default) def add(self, item): if item.cluster not in self.data: self.data[item.cluster] = {} - self.data[item.cluster][item._id] = item + self.data[item.cluster][self._item_id(item)] = item def remove(self, item): cluster = self._get_cluster() key = item - if isinstance(item, self._col_val_type): - if self._check_for_value(item._id, item.cluster): + if isinstance(item, self._val_type): + if self._check_for_value(self._item_id(item), item.cluster): cluster = item.cluster - del self.data[item.cluster][item._id] + del self.data[item.cluster][self._item_id(item)] elif isinstance(item, tuple) and len(item) == 2: cluster, key = item del self.data[cluster][key] - elif isinstance(item, self._col_key_type): + elif isinstance(item, self._key_type): del self.data[cluster][key] if not self.data[cluster]: @@ -273,7 +285,7 @@ cdef class MultiClusterCollection: except StopIteration: raise KeyError from None - del self.data[item.cluster][item._id] + del self.data[item.cluster][self._item_id(item)] return item def clear(self): @@ -288,16 +300,24 @@ cdef class MultiClusterCollection: return item -def multi_reload(collection): - if not collection: - return collection +def multi_reload(cur, frozen=True): + if not cur: + return cur + + new = cur.__class__.load() + for cluster, item in list(cur.keys().with_cluster()): + if (cluster, item) in new.keys().with_cluster(): + cur[cluster][item] = new.pop(item, cluster) + elif not frozen: + del cur[cluster][item] + + if not frozen: + for cluster, item in new.keys().with_cluster(): + if (cluster, item) not in cur.keys().with_cluster(): + cur[cluster][item] = new[cluster][item] + + return cur - new_data = collection.__class__.load() - for cluster, item in collection.keys().with_cluster(): - if (cluster, item) in new_data.keys().with_cluster(): - collection.data[cluster][item] = new_data.data[cluster][item] - return collection - def dict_recursive(collection): cdef dict out = {} @@ -319,3 +339,13 @@ def multi_dict_recursive(collection): # out[cluster][_id] = data return out + + +def sum_property(collection, prop, startval=0): + out = startval + for item in collection.values(): + data = prop.__get__(item) + if data is not None: + out += data + + return out diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index bee4f9ec..29da1ee8 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -25,14 +25,12 @@ from pyslurm.utils cimport cstr, ctime from pyslurm.utils.uint cimport * from pyslurm.utils.ctime cimport time_t - from libc.string cimport memcpy, memset from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t from libc.stdlib cimport free - from pyslurm.core.job.submission cimport JobSubmitDescription from pyslurm.core.job.step cimport JobSteps, JobStep - +from pyslurm.collections cimport MultiClusterMap from pyslurm cimport slurm from pyslurm.slurm cimport ( working_cluster_rec, @@ -67,7 +65,7 @@ from pyslurm.slurm cimport ( ) -cdef class Jobs(list): +cdef class Jobs(MultiClusterMap): """A collection of [pyslurm.Job][] objects. Args: diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 2c33d581..3639240f 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -35,6 +35,7 @@ from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm import collections from pyslurm.core.error import ( RPCError, verify_rpc, @@ -48,14 +49,11 @@ from pyslurm.utils.helpers import ( _getgrall_to_dict, _getpwall_to_dict, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, - _sum_prop, _get_exit_code, ) -cdef class Jobs(list): +cdef class Jobs(MultiClusterMap): def __cinit__(self): self.info = NULL @@ -65,20 +63,11 @@ cdef class Jobs(list): def __init__(self, jobs=None, frozen=False): self.frozen = frozen - - if isinstance(jobs, list): - for job in jobs: - if isinstance(job, int): - self.append(Job(job)) - else: - self.append(job) - elif isinstance(jobs, str): - joblist = jobs.split(",") - self.extend([Job(int(job)) for job in joblist]) - elif isinstance(jobs, dict): - self.extend([job for job in jobs.values()]) - elif jobs is not None: - raise TypeError("Invalid Type: {type(jobs)}") + super().__init__(data=jobs, + typ="Jobs", + val_type=Job, + id_attr=Job.id, + key_type=int) def as_dict(self, recursive=False): """Convert the collection data to a dict. @@ -92,11 +81,7 @@ cdef class Jobs(list): Returns: (dict): Collection as a dict. """ - col = collection_to_dict(self, identifier=Job.id, recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) - - def group_by_cluster(self): - return group_collection_by_cluster(self) + return super().as_dict(recursive) @staticmethod def load(preload_passwd_info=False, frozen=False): @@ -122,7 +107,7 @@ cdef class Jobs(list): cdef: dict passwd = {} dict groups = {} - Jobs jobs = Jobs.__new__(Jobs) + Jobs jobs = Jobs(frozen=frozen) int flags = slurm.SHOW_ALL | slurm.SHOW_DETAIL Job job @@ -150,7 +135,10 @@ cdef class Jobs(list): job.passwd = passwd job.groups = groups - jobs.append(job) + cluster = job.cluster + if cluster not in jobs.data: + jobs.data[cluster] = {} + jobs[cluster][job.id] = job # At this point we memcpy'd all the memory for the Jobs. Setting this # to 0 will prevent the slurm job free function to deallocate the @@ -169,29 +157,7 @@ cdef class Jobs(list): Raises: RPCError: When getting the Jobs from the slurmctld failed. """ - cdef: - Jobs reloaded_jobs - Jobs new_jobs = Jobs() - dict self_dict - - if not self: - return self - - reloaded_jobs = Jobs.load().as_dict() - for idx, jid in enumerate(self): - if jid in reloaded_jobs: - # Put the new data in. - new_jobs.append(reloaded_jobs[jid]) - - if not self.frozen: - self_dict = self.as_dict() - for jid in reloaded_jobs: - if jid not in self_dict: - new_jobs.append(reloaded_jobs[jid]) - - self.clear() - self.extend(new_jobs) - return self + return collections.multi_reload(self, frozen=self.frozen) def load_steps(self): """Load all Job steps for this collection of Jobs. @@ -220,19 +186,19 @@ cdef class Jobs(list): @property def memory(self): - return _sum_prop(self, Job.memory) + return collections.sum_property(self, Job.memory) @property def cpus(self): - return _sum_prop(self, Job.cpus) + return collections.sum_property(self, Job.cpus) @property def ntasks(self): - return _sum_prop(self, Job.ntasks) + return collections.sum_property(self, Job.ntasks) @property def cpu_time(self): - return _sum_prop(self, Job.cpu_time) + return collections.sum_property(self, Job.cpu_time) cdef class Job: diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index ea59e6ff..d769b614 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -55,9 +55,10 @@ from pyslurm.utils cimport cstr from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * +from pyslurm.collections cimport MultiClusterMap -cdef class Nodes(list): +cdef class Nodes(MultiClusterMap): """A collection of [pyslurm.Node][] objects. Args: diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 609016fe..dc44e99f 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -29,6 +29,7 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm import collections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -39,13 +40,12 @@ from pyslurm.utils.helpers import ( instance_to_dict, collection_to_dict, group_collection_by_cluster, - _sum_prop, nodelist_from_range_str, nodelist_to_range_str, ) -cdef class Nodes(list): +cdef class Nodes(MultiClusterMap): def __dealloc__(self): slurm_free_node_info_msg(self.info) @@ -56,38 +56,11 @@ cdef class Nodes(list): self.part_info = NULL def __init__(self, nodes=None): - if isinstance(nodes, list): - for node in nodes: - if isinstance(node, str): - self.append(Node(node)) - else: - self.append(node) - elif isinstance(nodes, str): - nodelist = nodes.split(",") - self.extend([Node(node) for node in nodelist]) - elif isinstance(nodes, dict): - self.extend([node for node in nodes.values()]) - elif nodes is not None: - raise TypeError("Invalid Type: {type(nodes)}") - - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Node.name, - recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) - - def group_by_cluster(self): - return group_collection_by_cluster(self) + super().__init__(data=nodes, + typ="Nodes", + val_type=Node, + id_attr=Node.name, + key_type=str) @staticmethod def load(preload_passwd_info=False): @@ -164,19 +137,7 @@ cdef class Nodes(list): Raises: RPCError: When getting the Nodes from the slurmctld failed. """ - cdef Nodes reloaded_nodes - - if not self: - return self - - reloaded_nodes = Nodes.load().as_dict() - for idx, node in enumerate(self): - node_name = node.name - if node in reloaded_nodes: - # Put the new data in. - self[idx] = reloaded_nodes[node_name] - - return self + return collections.multi_reload(self) def modify(self, Node changes): """Modify all Nodes in a collection. @@ -199,50 +160,47 @@ cdef class Nodes(list): >>> # Apply the changes to all the nodes >>> nodes.modify(changes) """ - cdef: - Node n = changes - list node_names = [node.name for node in self] - - node_str = nodelist_to_range_str(node_names) + cdef Node n = changes + node_str = nodelist_to_range_str(list(self.keys())) n._alloc_umsg() cstr.fmalloc(&n.umsg.node_names, node_str) verify_rpc(slurm_update_node(n.umsg)) @property def free_memory(self): - return _sum_prop(self, Node.free_memory) + return collections.sum_property(self, Node.free_memory) @property def real_memory(self): - return _sum_prop(self, Node.real_memory) + return collections.sum_property(self, Node.real_memory) @property def allocated_memory(self): - return _sum_prop(self, Node.allocated_memory) + return collections.sum_property(self, Node.allocated_memory) @property def total_cpus(self): - return _sum_prop(self, Node.total_cpus) + return collections.sum_property(self, Node.total_cpus) @property def idle_cpus(self): - return _sum_prop(self, Node.idle_cpus) + return collections.sum_property(self, Node.idle_cpus) @property def allocated_cpus(self): - return _sum_prop(self, Node.allocated_cpus) + return collections.sum_property(self, Node.allocated_cpus) @property def effective_cpus(self): - return _sum_prop(self, Node.effective_cpus) + return collections.sum_property(self, Node.effective_cpus) @property def current_watts(self): - return _sum_prop(self, Node.current_watts) + return collections.sum_property(self, Node.current_watts) @property def avg_watts(self): - return _sum_prop(self, Node.avg_watts) + return collections.sum_property(self, Node.avg_watts) cdef class Node: diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index 89de12e8..6e95de85 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -56,10 +56,10 @@ from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * from pyslurm.core cimport slurmctld -from pyslurm.utils.collections cimport MultiClusterCollection +from pyslurm.collections cimport MultiClusterMap -cdef class Partitions(MultiClusterCollection): +cdef class Partitions(MultiClusterMap): """A collection of [pyslurm.Partition][] objects. Args: diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 718f5553..28a7c13c 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -31,7 +31,7 @@ from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm.utils import collections +from pyslurm import collections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -39,7 +39,6 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, - _sum_prop, dehumanize, ) from pyslurm.utils.ctime import ( @@ -48,7 +47,7 @@ from pyslurm.utils.ctime import ( ) -cdef class Partitions(MultiClusterCollection): +cdef class Partitions(MultiClusterMap): def __dealloc__(self): slurm_free_partition_info_msg(self.info) @@ -58,9 +57,10 @@ cdef class Partitions(MultiClusterCollection): def __init__(self, partitions=None): super().__init__(data=partitions, - col_type="Partitions", - col_val_type=Partition, - col_key_type=str) + typ="Partitions", + val_type=Partition, + id_attr=Partition.name, + key_type=str) @staticmethod def load(): @@ -157,11 +157,11 @@ cdef class Partitions(MultiClusterCollection): @property def total_cpus(self): - return _sum_prop(self, Partition.total_cpus) + return collections.sum_property(self, Partition.total_cpus) @property def total_nodes(self): - return _sum_prop(self, Partition.total_nodes) + return collections.sum_property(self, Partition.total_nodes) cdef class Partition: From 86e490f1cdb95b3d27d1558395ba8d53018836d7 Mon Sep 17 00:00:00 2001 From: Toni Harzendorf Date: Sat, 8 Jul 2023 19:10:39 +0200 Subject: [PATCH 18/28] wip --- pyslurm/collections.pyx | 50 ++++++++++++++++++++++--- pyslurm/core/job/job.pyx | 19 ++-------- pyslurm/core/job/step.pxd | 10 +---- pyslurm/core/job/step.pyx | 76 ++++++++++++++++++-------------------- pyslurm/core/node.pyx | 27 ++++++++++---- pyslurm/core/partition.pyx | 22 +++++++---- pyslurm/db/assoc.pxd | 7 ++-- pyslurm/db/assoc.pyx | 51 ++++++++++++------------- pyslurm/db/job.pxd | 5 ++- pyslurm/db/job.pyx | 67 ++++++++++++--------------------- pyslurm/db/qos.pxd | 2 +- pyslurm/db/qos.pyx | 30 ++++++++------- pyslurm/db/tres.pxd | 4 +- pyslurm/db/tres.pyx | 32 ++++++++-------- tests/unit/test_db_job.py | 3 -- 15 files changed, 208 insertions(+), 197 deletions(-) diff --git a/pyslurm/collections.pyx b/pyslurm/collections.pyx index a439ec81..5681a849 100644 --- a/pyslurm/collections.pyx +++ b/pyslurm/collections.pyx @@ -121,7 +121,7 @@ cdef class MultiClusterMap: def __init__(self, data, typ=None, val_type=None, key_type=None, id_attr=None, init_data=True): - self.data = data if data else {} + self.data = {} if init_data else data self._typ = typ self._key_type = key_type self._val_type = val_type @@ -140,12 +140,13 @@ cdef class MultiClusterMap: self.data[LOCAL_CLUSTER].update({self._item_id(item): item}) elif isinstance(data, str): itemlist = data.split(",") - items = {item:self._val_type(item) for item in itemlist} + items = {self._key_type(item):self._val_type(item) + for item in itemlist} self.data[LOCAL_CLUSTER] = items - #elif isinstance(data, dict): - # self.extend([item for item in data.values()]) + elif isinstance(data, dict): + self.update(data) elif data is not None: - raise TypeError("Invalid Type: {type(data)}") + raise TypeError(f"Invalid Type: {type(data)}") def _get_key_and_cluster(self, item): cluster = self._get_cluster() @@ -182,7 +183,7 @@ cdef class MultiClusterMap: del self.data[cluster][key] def __len__(self): - sum(len(data) for data in self.data.values()) + return sum(len(data) for data in self.data.values()) def __repr__(self): return f'{self._typ}([{", ".join(map(repr, self))}])' @@ -299,6 +300,43 @@ cdef class MultiClusterMap: del self.data[cluster][key] return item + def _check_val_type(self, item): + if not isinstance(item, self._val_type): + raise TypeError(f"Invalid Type: {type(item).__name__}. " + f"{self._val_type}.__name__ is required.") + + + def _update(self, data, clus): + for key in data: + try: + iterator = iter(data[key]) + except TypeError as e: + cluster = self._get_cluster() if not clus else clus + if not cluster in self.data: + self.data[cluster] = {} + self.data[cluster].update(data) + break + else: + cluster = key + if not cluster in self.data: + self.data[cluster] = {} + self.data[cluster].update(data[cluster]) +# col = data[cluster] +# if hasattr(col, "keys") and callable(col.keys): +# for k in col.keys(): + +# else: +# for item in col: +# k, v = item + + + def update(self, data=None, cluster=None, **kwargs): + if data: + self._update(data, cluster) + + if kwargs: + self._update(kwargs, cluster) + def multi_reload(cur, frozen=True): if not cur: diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 3639240f..d7965481 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -140,14 +140,8 @@ cdef class Jobs(MultiClusterMap): jobs.data[cluster] = {} jobs[cluster][job.id] = job - # At this point we memcpy'd all the memory for the Jobs. Setting this - # to 0 will prevent the slurm job free function to deallocate the - # memory for the individual jobs. This should be fine, because they - # are free'd automatically in __dealloc__ since the lifetime of each - # job-pointer is tied to the lifetime of its corresponding "Job" - # instance. + # We have extracted all pointers jobs.info.record_count = 0 - jobs.frozen = frozen return jobs @@ -173,16 +167,11 @@ cdef class Jobs(MultiClusterMap): RPCError: When retrieving the Job information for all the Steps failed. """ - cdef dict steps = JobSteps.load().as_dict() - - for idx, job in enumerate(self): - # Ignore any Steps from Jobs which do not exist in this - # collection. + cdef dict steps = JobSteps.load_all() + for job in self.values(): jid = job.id if jid in steps: - job_steps = self[idx].steps - job_steps.clear() - job_steps.extend(steps[jid].values()) + job.steps = steps[jid] @property def memory(self): diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index 458ee506..ae2d9c48 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -49,16 +49,11 @@ from pyslurm.utils.ctime cimport time_t from pyslurm.core.job.task_dist cimport TaskDistribution -cdef class JobSteps(list): +cdef class JobSteps(dict): """A collection of [pyslurm.JobStep][] objects for a given Job. - Args: - job (Union[Job, int]): - A Job for which the Steps should be loaded. - Raises: RPCError: When getting the Job steps from the slurmctld failed. - MemoryError: If malloc fails to allocate memory. """ cdef: @@ -68,8 +63,7 @@ cdef class JobSteps(list): @staticmethod cdef JobSteps _load_single(Job job) - - cdef _load_data(self, uint32_t job_id, int flags) + cdef dict _load_data(self, uint32_t job_id, int flags) cdef class JobStep: diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index d4038f54..fbd6ada6 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -27,12 +27,11 @@ from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm import collections from pyslurm.utils.helpers import ( signal_to_num, instance_to_dict, uid_to_name, - collection_to_dict, - group_collection_by_cluster, humanize_step_id, dehumanize_step_id, ) @@ -46,7 +45,7 @@ from pyslurm.utils.ctime import ( ) -cdef class JobSteps(list): +cdef class JobSteps(dict): def __dealloc__(self): slurm_free_job_step_info_response_msg(self.info) @@ -55,8 +54,8 @@ cdef class JobSteps(list): self.info = NULL def __init__(self, steps=None): - if isinstance(steps, list): - self.extend(steps) + if isinstance(steps, dict): + self.update(steps) elif steps is not None: raise TypeError("Invalid Type: {type(steps)}") @@ -72,56 +71,45 @@ cdef class JobSteps(list): Returns: (dict): Collection as a dict. """ - col = collection_to_dict(self, identifier=JobStep.id, - recursive=recursive, group_id=JobStep.job_id) - col = col.get(LOCAL_CLUSTER, {}) - if self._job_id: - return col.get(self._job_id, {}) - - return col - - def group_by_cluster(self): - return group_collection_by_cluster(self) + return self if not recursive else collections.dict_recursive(self) @staticmethod - def load(job_id=0): + def load(job): """Load the Job Steps from the system. Args: - job_id (Union[Job, int]): + job (Union[Job, int]): The Job for which the Steps should be loaded. Returns: (pyslurm.JobSteps): JobSteps of the Job """ cdef: - Job job + Job _job JobSteps steps - if job_id: - job = Job.load(job_id.id if isinstance(job_id, Job) else job_id) - steps = JobSteps._load_single(job) - steps._job_id = job.id - return steps - else: - steps = JobSteps() - return steps._load_data(0, slurm.SHOW_ALL) + _job = Job.load(job.id if isinstance(job, Job) else job) + steps = JobSteps._load_single(_job) + steps._job_id = _job.id + return steps @staticmethod cdef JobSteps _load_single(Job job): cdef JobSteps steps = JobSteps() - steps._load_data(job.id, slurm.SHOW_ALL) - if not steps and not slurm.IS_JOB_PENDING(job.ptr): + data = steps._load_data(job.id, slurm.SHOW_ALL) + if not data and not slurm.IS_JOB_PENDING(job.ptr): msg = f"Failed to load step info for Job {job.id}." raise RPCError(msg=msg) + steps.update(data[job.id]) return steps - - cdef _load_data(self, uint32_t job_id, int flags): + + cdef dict _load_data(self, uint32_t job_id, int flags): cdef: JobStep step uint32_t cnt = 0 + dict steps = {} rc = slurm_get_job_steps(0, job_id, slurm.NO_VAL, &self.info, flags) @@ -133,21 +121,29 @@ cdef class JobSteps(list): # Put each job-step pointer into its own "JobStep" instance. for cnt in range(self.info.job_step_count): step = JobStep.from_ptr(&self.info.job_steps[cnt]) - # Prevent double free if xmalloc fails mid-loop and a MemoryError # is raised by replacing it with a zeroed-out job_step_info_t. self.info.job_steps[cnt] = self.tmp_info - self.append(step) - - # At this point we memcpy'd all the memory for the Steps. Setting this - # to 0 will prevent the slurm step free function to deallocate the - # memory for the individual steps. This should be fine, because they - # are free'd automatically in __dealloc__ since the lifetime of each - # step-pointer is tied to the lifetime of its corresponding JobStep - # instance. + + job_id = step.job_id + if not job_id in steps: + steps[job_id] = JobSteps() + steps[job_id][step.id] = step + + # We have extracted all pointers self.info.job_step_count = 0 + return steps + + @staticmethod + def load_all(): + """Loads all the steps in the system. - return self + Returns: + (dict): A dict where every JobID (key) is mapped with an instance + of its JobSteps (value). + """ + cdef JobSteps steps = JobSteps() + return steps._load_data(slurm.NO_VAL, slurm.SHOW_ALL) cdef class JobStep: diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index dc44e99f..9ab348f6 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -62,6 +62,20 @@ cdef class Nodes(MultiClusterMap): id_attr=Node.name, key_type=str) + def as_dict(self, recursive=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + return super().as_dict(recursive) + @staticmethod def load(preload_passwd_info=False): """Load all nodes in the system. @@ -114,16 +128,13 @@ cdef class Nodes(MultiClusterMap): node.passwd = passwd node.groups = groups - nodes.append(node) + cluster = node.cluster + if cluster not in nodes.data: + nodes.data[cluster] = {} + nodes.data[cluster][node.name] = node - # At this point we memcpy'd all the memory for the Nodes. Setting this - # to 0 will prevent the slurm node free function to deallocate the - # memory for the individual nodes. This should be fine, because they - # are free'd automatically in __dealloc__ since the lifetime of each - # node-pointer is tied to the lifetime of its corresponding "Node" - # instance. + # We have extracted all pointers nodes.info.record_count = 0 - return nodes def reload(self): diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 28a7c13c..97b11a7b 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -62,6 +62,20 @@ cdef class Partitions(MultiClusterMap): id_attr=Partition.name, key_type=str) + def as_dict(self, recursive=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + return super().as_dict(recursive) + @staticmethod def load(): """Load all Partitions in the system. @@ -105,14 +119,8 @@ cdef class Partitions(MultiClusterMap): partition.slurm_conf = slurm_conf partitions.data[cluster][partition.name] = partition - # At this point we memcpy'd all the memory for the Partitions. Setting - # this to 0 will prevent the slurm partition free function to - # deallocate the memory for the individual partitions. This should be - # fine, because they are free'd automatically in __dealloc__ since the - # lifetime of each partition-pointer is tied to the lifetime of its - # corresponding "Partition" instance. + # We have extracted all pointers partitions.info.record_count = 0 - return partitions def reload(self): diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 12a0cde1..912f0d6e 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -49,12 +49,13 @@ from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.utils.uint cimport * from pyslurm.db.qos cimport QualitiesOfService, _set_qos_list +from pyslurm.collections cimport MultiClusterMap cdef _parse_assoc_ptr(Association ass) cdef _create_assoc_ptr(Association ass, conn=*) -cdef class Associations(list): +cdef class Associations(MultiClusterMap): pass @@ -69,8 +70,8 @@ cdef class AssociationFilter: cdef class Association: cdef: slurmdb_assoc_rec_t *ptr - dict qos_data - dict tres_data + QualitiesOfService qos_data + TrackableResources tres_data cdef public: group_tres diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index d1ac4789..820fe998 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -32,14 +32,19 @@ from pyslurm.utils.helpers import ( from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm import collections -cdef class Associations(list): +cdef class Associations(MultiClusterMap): - def __init__(self): - pass + def __init__(self, assocs=None): + super().__init__(data=assocs, + typ="Associations", + val_type=Association, + id_attr=Association.id, + key_type=int) - def as_dict(self, recursive=False, group_by_cluster=False): + def as_dict(self, recursive=False): """Convert the collection data to a dict. Args: @@ -47,25 +52,11 @@ cdef class Associations(list): By default, the objects will not be converted to a dict. If this is set to `True`, then additionally all objects are converted to dicts. - group_by_cluster (bool, optional): - By default, only the Jobs from your local Cluster are - returned. If this is set to `True`, then all the Jobs in the - collection will be grouped by the Cluster - with the name of - the cluster as the key and the value being the collection as - another dict. Returns: (dict): Collection as a dict. """ - col = collection_to_dict(self, identifier=Association.id, - recursive=recursive) - if not group_by_cluster: - return col.get(LOCAL_CLUSTER, {}) - - return col - - def group_by_cluster(self): - return group_collection_by_cluster(self) + return super().as_dict(recursive) @staticmethod def load(AssociationFilter db_filter=None, Connection db_connection=None): @@ -76,8 +67,8 @@ cdef class Associations(list): SlurmList assoc_data SlurmListItem assoc_ptr Connection conn - dict qos_data - dict tres_data + QualitiesOfService qos_data + TrackableResources tres_data # Prepare SQL Filter if not db_filter: @@ -96,10 +87,10 @@ cdef class Associations(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(db_connection=conn).as_dict( - name_is_key=False) - tres_data = TrackableResources.load(db_connection=conn).as_dict( - name_is_key=False) + qos_data = QualitiesOfService.load(db_connection=conn, + name_is_key=False) + tres_data = TrackableResources.load(db_connection=conn, + name_is_key=False) # Setup Association objects for assoc_ptr in SlurmList.iter_and_pop(assoc_data): @@ -107,7 +98,11 @@ cdef class Associations(list): assoc.qos_data = qos_data assoc.tres_data = tres_data _parse_assoc_ptr(assoc) - out.append(assoc) + + cluster = assoc.cluster + if cluster not in out.data: + out.data[cluster] = {} + out.data[cluster][assoc.id] = assoc return out @@ -408,8 +403,8 @@ cdef class Association: cdef _parse_assoc_ptr(Association ass): cdef: - dict tres = ass.tres_data - dict qos = ass.qos_data + TrackableResources tres = ass.tres_data + QualitiesOfService qos = ass.qos_data ass.group_tres = TrackableResourceLimits.from_ids( ass.ptr.grp_tres, tres) diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index fc395943..ed34dab4 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -53,6 +53,7 @@ from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.db.qos cimport QualitiesOfService from pyslurm.db.tres cimport TrackableResources, TrackableResource +from pyslurm.collections cimport MultiClusterMap cdef class JobFilter: @@ -150,7 +151,7 @@ cdef class JobFilter: with_env -cdef class Jobs(list): +cdef class Jobs(MultiClusterMap): """A collection of [pyslurm.db.Job][] objects.""" pass @@ -283,7 +284,7 @@ cdef class Job: """ cdef: slurmdb_job_rec_t *ptr - dict qos_data + QualitiesOfService qos_data cdef public: JobSteps steps diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 636e1137..923cba1a 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -28,6 +28,7 @@ from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm import collections from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -80,7 +81,7 @@ cdef class JobFilter: qos_data = QualitiesOfService.load() for user_input in self.qos: found = False - for qos in qos_data: + for qos in qos_data.values(): if (qos.id == user_input or qos.name == user_input or qos == user_input): @@ -189,24 +190,16 @@ cdef class JobFilter: JobSearchFilter = JobFilter -cdef class Jobs(list): +cdef class Jobs(MultiClusterMap): def __init__(self, jobs=None): - if isinstance(jobs, list): - for job in jobs: - if isinstance(job, int): - self.append(Job(job)) - else: - self.append(job) - elif isinstance(jobs, str): - joblist = jobs.split(",") - self.extend([Job(job) for job in joblist]) - elif isinstance(jobs, dict): - self.extend([job for job in jobs.values()]) - elif jobs is not None: - raise TypeError("Invalid Type: {type(jobs)}") - - def as_dict(self, recursive=False, group_by_cluster=False): + super().__init__(data=jobs, + typ="Jobs", + val_type=Job, + id_attr=Job.id, + key_type=int) + + def as_dict(self, recursive=False): """Convert the collection data to a dict. Args: @@ -214,29 +207,11 @@ cdef class Jobs(list): By default, the objects will not be converted to a dict. If this is set to `True`, then additionally all objects are converted to dicts. - group_by_cluster (bool, optional): - By default, only the Jobs from your local Cluster are - returned. If this is set to `True`, then all the Jobs in the - collection will be grouped by the Cluster - with the name of - the cluster as the key and the value being the collection as - another dict. Returns: (dict): Collection as a dict. """ - col = collection_to_dict(self, identifier=Job.id, recursive=recursive) - if not group_by_cluster: - return col.get(LOCAL_CLUSTER, {}) - - return col - - def group_by_cluster(self): - """Group Jobs by cluster name - - Returns: - (dict[str, Jobs]): Jobs grouped by cluster. - """ - return group_collection_by_cluster(self) + return super().as_dict(recursive) @staticmethod def load(JobFilter db_filter=None, Connection db_connection=None): @@ -280,7 +255,7 @@ cdef class Jobs(list): SlurmList job_data SlurmListItem job_ptr Connection conn - dict qos_data + QualitiesOfService qos_data # Prepare SQL Filter if not db_filter: @@ -297,15 +272,14 @@ cdef class Jobs(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(db_connection=conn).as_dict( - name_is_key=False) + qos_data = QualitiesOfService.load(db_connection=conn, + name_is_key=False) # TODO: also get trackable resources with slurmdb_tres_get and store # it in each job instance. tres_alloc_str and tres_req_str only # contain the numeric tres ids, but it probably makes more sense to # convert them to its type name for the user in advance. - # TODO: For multi-cluster support, remove duplicate federation jobs # TODO: How to handle the possibility of duplicate job ids that could # appear if IDs on a cluster are resetted? for job_ptr in SlurmList.iter_and_pop(job_data): @@ -313,7 +287,11 @@ cdef class Jobs(list): job.qos_data = qos_data job._create_steps() JobStatistics._sum_step_stats_for_job(job, job.steps) - out.append(job) + + cluster = job.cluster + if cluster not in out.data: + out.data[cluster] = {} + out[cluster][job.id] = job return out @@ -420,7 +398,7 @@ cdef class Jobs(list): # # " submitted at " # - # We are just interest in the Job-ID, so extract it + # We are just interested in the Job-ID, so extract it job_id = response_str.split(" ")[0] if job_id and job_id.isdigit(): out.append(int(job_id)) @@ -444,10 +422,11 @@ cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id=0, cluster=LOCAL_CLUSTER, **kwargs): + def __init__(self, job_id=0, cluster=None, **kwargs): self._alloc_impl() self.ptr.jobid = int(job_id) - cstr.fmalloc(&self.ptr.cluster, cluster) + cstr.fmalloc(&self.ptr.cluster, + LOCAL_CLUSTER if not cluster else cluster) for k, v in kwargs.items(): setattr(self, k, v) diff --git a/pyslurm/db/qos.pxd b/pyslurm/db/qos.pxd index 9cb3df86..ea0fde2d 100644 --- a/pyslurm/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -44,7 +44,7 @@ from pyslurm.utils cimport cstr cdef _set_qos_list(List *in_list, vals, QualitiesOfService data) -cdef class QualitiesOfService(list): +cdef class QualitiesOfService(dict): pass diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index a01ef9b0..9eba5add 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -25,14 +25,15 @@ from pyslurm.core.error import RPCError from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global from pyslurm.db.connection import _open_conn_or_error +from pyslurm import collections -cdef class QualitiesOfService(list): +cdef class QualitiesOfService(dict): def __init__(self): pass - def as_dict(self, recursive=False, name_is_key=True): + def as_dict(self, recursive=False): """Convert the collection data to a dict. Args: @@ -40,24 +41,24 @@ cdef class QualitiesOfService(list): By default, the objects will not be converted to a dict. If this is set to `True`, then additionally all objects are converted to dicts. - name_is_key (bool, optional): - By default, the keys in this dict are the names of each QoS. - If this is set to `False`, then the unique ID of the QoS will - be used as dict keys. Returns: (dict): Collection as a dict. """ - identifier = QualityOfService.name - if not name_is_key: - identifier = QualityOfService.id + return self if not recursive else collections.dict_recursive(self) - return collection_to_dict_global(self, identifier=identifier, - recursive=recursive) @staticmethod def load(QualityOfServiceFilter db_filter=None, - Connection db_connection=None): + Connection db_connection=None, name_is_key=True): + """Load QoS data from the Database + + Args: + name_is_key (bool, optional): + By default, the keys in this dict are the names of each QoS. + If this is set to `False`, then the unique ID of the QoS will + be used as dict keys. + """ cdef: QualitiesOfService out = QualitiesOfService() QualityOfService qos @@ -83,7 +84,8 @@ cdef class QualitiesOfService(list): # Setup QOS objects for qos_ptr in SlurmList.iter_and_pop(qos_data): qos = QualityOfService.from_ptr(qos_ptr.data) - out.append(qos) + _id = qos.name if name_is_key else qos.id + out[_id] = qos return out @@ -227,7 +229,7 @@ def _qos_names_to_ids(qos_list, QualitiesOfService data): def _validate_qos_single(qid, QualitiesOfService data): - for item in data: + for item in data.values(): if qid == item.id or qid == item.name: return item.id diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index 41ed1b4d..ef1568f6 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -42,7 +42,7 @@ from pyslurm.db.connection cimport Connection cdef find_tres_count(char *tres_str, typ, on_noval=*, on_inf=*) cdef find_tres_limit(char *tres_str, typ) cdef merge_tres_str(char **tres_str, typ, val) -cdef _tres_ids_to_names(char *tres_str, dict tres_data) +cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data) cdef _set_tres_limits(char **dest, TrackableResourceLimits src, TrackableResources tres_data) @@ -62,7 +62,7 @@ cdef class TrackableResourceLimits: license @staticmethod - cdef from_ids(char *tres_id_str, dict tres_data) + cdef from_ids(char *tres_id_str, TrackableResources tres_data) cdef class TrackableResourceFilter: diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index df93dda0..cea5904e 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -56,7 +56,7 @@ cdef class TrackableResourceLimits: setattr(self, k, v) @staticmethod - cdef from_ids(char *tres_id_str, dict tres_data): + cdef from_ids(char *tres_id_str, TrackableResources tres_data): tres_list = _tres_ids_to_names(tres_id_str, tres_data) if not tres_list: return None @@ -139,7 +139,7 @@ cdef class TrackableResources(list): def __init__(self): pass - def as_dict(self, recursive=False, name_is_key=True): + def as_dict(self, recursive=False): """Convert the collection data to a dict. Args: @@ -147,23 +147,22 @@ cdef class TrackableResources(list): By default, the objects will not be converted to a dict. If this is set to `True`, then additionally all objects are converted to dicts. - name_is_key (bool, optional): - By default, the keys in this dict are the names of each TRES. - If this is set to `False`, then the unique ID of the TRES will - be used as dict keys. Returns: (dict): Collection as a dict. """ - identifier = TrackableResource.type_and_name - if not name_is_key: - identifier = TrackableResource.id - - return collection_to_dict_global(self, identifier=identifier, - recursive=recursive) + return self if not recursive else collections.dict_recursive(self) @staticmethod - def load(Connection db_connection=None): + def load(Connection db_connection=None, name_is_key=True): + """Load Trackable Resources from the Database. + + Args: + name_is_key (bool, optional): + By default, the keys in this dict are the names of each TRES. + If this is set to `False`, then the unique ID of the TRES will + be used as dict keys. + """ cdef: TrackableResources out = TrackableResources() TrackableResource tres @@ -188,7 +187,8 @@ cdef class TrackableResources(list): for tres_ptr in SlurmList.iter_and_pop(tres_data): tres = TrackableResource.from_ptr( tres_ptr.data) - out.append(tres) + _id = tres.type_and_name if name_is_key else tres.id + out[_id] = tres return out @@ -307,7 +307,7 @@ cdef merge_tres_str(char **tres_str, typ, val): cstr.from_dict(tres_str, current) -cdef _tres_ids_to_names(char *tres_str, dict tres_data): +cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data): if not tres_str: return None @@ -342,7 +342,7 @@ def _tres_names_to_ids(dict tres_dict, TrackableResources tres_data): def _validate_tres_single(tid, TrackableResources tres_data): - for tres in tres_data: + for tres in tres_data.values(): if tid == tres.id or tid == tres.type_and_name: return tres.id diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index 7b77671f..fc097839 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -45,7 +45,6 @@ def test_filter(): def test_create_collection(): jobs = pyslurm.db.Jobs("101,102") assert len(jobs) == 2 - jobs = jobs.as_dict() assert 101 in jobs assert 102 in jobs assert jobs[101].id == 101 @@ -53,7 +52,6 @@ def test_create_collection(): jobs = pyslurm.db.Jobs([101, 102]) assert len(jobs) == 2 - jobs = jobs.as_dict() assert 101 in jobs assert 102 in jobs assert jobs[101].id == 101 @@ -66,7 +64,6 @@ def test_create_collection(): } ) assert len(jobs) == 2 - jobs = jobs.as_dict() assert 101 in jobs assert 102 in jobs assert jobs[101].id == 101 From 76eecac8c6a17497155b65b0937e5f26863af80a Mon Sep 17 00:00:00 2001 From: Toni Harzendorf Date: Sun, 9 Jul 2023 18:59:57 +0200 Subject: [PATCH 19/28] wip --- pyslurm/collections.pyx | 91 +++++--- pyslurm/core/job/job.pyx | 23 +- pyslurm/core/job/step.pyx | 4 + pyslurm/core/node.pyx | 20 +- pyslurm/core/partition.pxd | 2 +- pyslurm/core/partition.pyx | 26 +-- pyslurm/db/assoc.pyx | 3 + pyslurm/db/job.pyx | 17 +- pyslurm/db/qos.pyx | 9 +- tests/integration/test_job.py | 2 +- tests/integration/test_node.py | 6 +- tests/integration/test_partition.py | 17 +- tests/unit/test_collection.py | 318 ++++++++++++++++++++++++++++ tests/unit/test_db_job.py | 29 --- tests/unit/test_db_qos.py | 5 - tests/unit/test_node.py | 28 --- tests/unit/test_partition.py | 28 --- 17 files changed, 430 insertions(+), 198 deletions(-) create mode 100644 tests/unit/test_collection.py diff --git a/pyslurm/collections.pyx b/pyslurm/collections.pyx index 5681a849..6af3d681 100644 --- a/pyslurm/collections.pyx +++ b/pyslurm/collections.pyx @@ -23,6 +23,7 @@ # cython: language_level=3 from pyslurm.db.cluster import LOCAL_CLUSTER +import json class BaseView: @@ -42,14 +43,18 @@ class BaseView: class ValuesView(BaseView): def __contains__(self, val): - for item in self._mcm: + # for item in self._mcm + for item in self: if item is val or item == val: return True return False def __iter__(self): - for item in self._mcm: - yield item +# for item in self._mcm: +# yield item + for cluster in self._mcm.data.values(): + for item in cluster.values(): + yield item class MCKeysView(BaseView): @@ -64,6 +69,18 @@ class MCKeysView(BaseView): yield (cluster, key) +class ClustersView(BaseView): + + def __contains__(self, item): + return item in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + yield from self._data + + class KeysView(BaseView): def __contains__(self, item): @@ -161,6 +178,11 @@ cdef class MultiClusterMap: def _item_id(self, item): return self._id_attr.__get__(item) + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.data == other.data + return NotImplemented + def __getitem__(self, item): if item in self.data: return self.data[item] @@ -186,13 +208,18 @@ cdef class MultiClusterMap: return sum(len(data) for data in self.data.values()) def __repr__(self): - return f'{self._typ}([{", ".join(map(repr, self))}])' + return f'{self._typ}([{", ".join(map(repr, self.values()))}])' def __contains__(self, item): if isinstance(item, self._val_type): return self._check_for_value(self._item_id(item), item.cluster) elif isinstance(item, self._key_type): - return self._check_for_value(item, self._get_cluster()) + found = False + for cluster, data in self.data.items(): + if item in data: + found = True + return found + #return self._check_for_value(item, self._get_cluster()) elif isinstance(item, tuple): cluster, item = item return self._check_for_value(item, cluster) @@ -226,9 +253,7 @@ cdef class MultiClusterMap: return out def __iter__(self): - for cluster in self.data.values(): - for item in cluster.values(): - yield item + return iter(self.keys()) def __bool__(self): return bool(self.data) @@ -259,17 +284,25 @@ cdef class MultiClusterMap: if not self.data[cluster]: del self.data[cluster] - def as_dict(self, recursive=False, multi_cluster=False): - cdef dict out = self.data.get(self._get_cluster(), {}) +# def as_dict(self, recursive=False, multi_cluster=False): +# cdef dict out = self.data.get(self._get_cluster(), {}) - if multi_cluster: - if recursive: - return multi_dict_recursive(self) - return self.data - elif recursive: - return dict_recursive(out) +# if multi_cluster: +# if recursive: +# return multi_dict_recursive(self) +# return self.data +# elif recursive: +# return dict_recursive(out) - return out +# return out + + def to_json(self, multi_cluster=False): + data = multi_dict_recursive(self) + if multi_cluster: + return json.dumps(data) + else: + cluster = self._get_cluster() + return json.dumps(data[cluster]) def keys(self): return KeysView(self) @@ -278,11 +311,14 @@ cdef class MultiClusterMap: return ItemsView(self) def values(self): - return self + return ValuesView(self) + + def clusters(self): + return ClustersView(self) def popitem(self): try: - item = next(iter(self)) + item = next(iter(self.values())) except StopIteration: raise KeyError from None @@ -294,10 +330,10 @@ cdef class MultiClusterMap: def pop(self, key, cluster=None, default=None): item = self.get(key, cluster=cluster, default=default) - if not item: + if item is default or item == default: return default - del self.data[cluster][key] + del self.data[item.cluster][key] return item def _check_val_type(self, item): @@ -333,7 +369,6 @@ cdef class MultiClusterMap: def update(self, data=None, cluster=None, **kwargs): if data: self._update(data, cluster) - if kwargs: self._update(kwargs, cluster) @@ -364,18 +399,14 @@ def dict_recursive(collection): return out +def to_json(collection): + return json.dumps(dict_recursive(collection)) + + def multi_dict_recursive(collection): cdef dict out = collection.data.copy() for cluster, data in collection.data.items(): out[cluster] = dict_recursive(data) -# if group_id: -# grp_id = group_id.__get__(item) -# if grp_id not in out[cluster]: -# out[cluster][grp_id] = {} -# out[cluster][grp_id].update({_id: data}) -# else: -# out[cluster][_id] = data - return out diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index d7965481..f2661d57 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -69,20 +69,6 @@ cdef class Jobs(MultiClusterMap): id_attr=Job.id, key_type=int) - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return super().as_dict(recursive) - @staticmethod def load(preload_passwd_info=False, frozen=False): """Retrieve all Jobs from the Slurm controller @@ -201,7 +187,7 @@ cdef class Job: self.passwd = {} self.groups = {} cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) - self.steps = JobSteps.__new__(JobSteps) + self.steps = JobSteps() def _alloc_impl(self): if not self.ptr: @@ -216,10 +202,8 @@ cdef class Job: def __dealloc__(self): self._dealloc_impl() - def __eq__(self, other): - if isinstance(other, Job): - return self.id == other.id and self.cluster == other.cluster - return NotImplemented + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' @staticmethod def load(job_id): @@ -284,7 +268,6 @@ cdef class Job: wrap.groups = {} wrap.steps = JobSteps.__new__(JobSteps) memcpy(wrap.ptr, in_ptr, sizeof(slurm_job_info_t)) - return wrap cdef _swap_data(Job dst, Job src): diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index fbd6ada6..81b84a0d 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -156,6 +156,7 @@ cdef class JobStep: self._alloc_impl() self.job_id = job_id.id if isinstance(job_id, Job) else job_id self.id = step_id + cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) # Initialize attributes, if any were provided for k, v in kwargs.items(): @@ -199,6 +200,9 @@ cdef class JobStep: # Call descriptors __set__ directly JobStep.__dict__[name].__set__(self, val) + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' + @staticmethod def load(job_id, step_id): """Load information for a specific job step. diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 9ab348f6..24c8fcee 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -62,20 +62,6 @@ cdef class Nodes(MultiClusterMap): id_attr=Node.name, key_type=str) - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return super().as_dict(recursive) - @staticmethod def load(preload_passwd_info=False): """Load all nodes in the system. @@ -99,7 +85,7 @@ cdef class Nodes(MultiClusterMap): cdef: dict passwd = {} dict groups = {} - Nodes nodes = Nodes.__new__(Nodes) + Nodes nodes = Nodes() int flags = slurm.SHOW_ALL Node node @@ -262,8 +248,8 @@ cdef class Node: # Call descriptors __set__ directly Node.__dict__[name].__set__(self, val) - def __eq__(self, other): - return isinstance(other, Node) and self.name == other.name + def __repr__(self): + return f'{self.__class__.__name__}({self.name})' @staticmethod cdef Node from_ptr(node_info_t *in_ptr): diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index 6e95de85..147a589a 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -168,7 +168,7 @@ cdef class Partition: This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] min_nodes (int): Minimum number of Nodes that must be requested by Jobs - max_time_limit (int): + max_time (int): Max Time-Limit in minutes that Jobs can request This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 97b11a7b..18a6d132 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -62,20 +62,6 @@ cdef class Partitions(MultiClusterMap): id_attr=Partition.name, key_type=str) - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return super().as_dict(recursive) - @staticmethod def load(): """Load all Partitions in the system. @@ -248,11 +234,11 @@ cdef class Partition: >>> import pyslurm >>> part = pyslurm.Partition.load("normal") """ - partitions = Partitions.load().as_dict() - if name not in partitions: + part = Partitions.load().get(name) + if not part: raise RPCError(msg=f"Partition '{name}' doesn't exist") - return partitions[name] + return part def create(self): """Create a Partition. @@ -523,11 +509,11 @@ cdef class Partition: self.ptr.min_nodes = u32(val, zero_is_noval=False) @property - def max_time_limit(self): + def max_time(self): return _raw_time(self.ptr.max_time, on_inf=UNLIMITED) - @max_time_limit.setter - def max_time_limit(self, val): + @max_time.setter + def max_time(self, val): self.ptr.max_time = timestr_to_mins(val) @property diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 820fe998..d9426944 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -221,6 +221,9 @@ cdef class Association: slurmdb_init_assoc_rec(self.ptr, 0) + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' + @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): cdef Association wrap = Association.__new__(Association) diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 923cba1a..8567c89f 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -427,6 +427,9 @@ cdef class Job: self.ptr.jobid = int(job_id) cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER if not cluster else cluster) + self.qos_data = QualitiesOfService() + self.steps = JobSteps() + self.stats = JobStatistics() for k, v in kwargs.items(): setattr(self, k, v) @@ -450,12 +453,14 @@ cdef class Job: return wrap @staticmethod - def load(job_id, cluster=LOCAL_CLUSTER, with_script=False, with_env=False): + def load(job_id, cluster=None, with_script=False, with_env=False): """Load the information for a specific Job from the Database. Args: job_id (int): ID of the Job to be loaded. + cluster (str): + Name of the Cluster to search in. Returns: (pyslurm.db.Job): Returns a new Database Job instance @@ -477,15 +482,16 @@ cdef class Job: >>> print(db_job.script) """ + cluster = LOCAL_CLUSTER if not cluster else cluster jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], with_script=with_script, with_env=with_env) - jobs = Jobs.load(jfilter) - if not jobs: + job = Jobs.load(jfilter).get(int(job_id), cluster=cluster) + if not job: raise RPCError(msg=f"Job {job_id} does not exist on " f"Cluster {cluster}") # TODO: There might be multiple entries when job ids were reset. - return jobs[0] + return job def _create_steps(self): cdef: @@ -521,6 +527,9 @@ cdef class Job: return out + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' + def modify(self, changes, db_connection=None): """Modify a Slurm database Job. diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 9eba5add..16db21a9 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -172,6 +172,9 @@ cdef class QualityOfService: wrap.ptr = in_ptr return wrap + def __repr__(self): + return f'{self.__class__.__name__}({self.name})' + def as_dict(self): """Database QualityOfService information formatted as a dictionary. @@ -197,11 +200,11 @@ cdef class QualityOfService: sucessful. """ qfilter = QualityOfServiceFilter(names=[name]) - qos_data = QualitiesOfService.load(qfilter) - if not qos_data: + qos = QualitiesOfService.load(qfilter).get(name) + if not qos: raise RPCError(msg=f"QualityOfService {name} does not exist") - return qos_data[0] + return qos @property def name(self): diff --git a/tests/integration/test_job.py b/tests/integration/test_job.py index cef42daf..15c4bdef 100644 --- a/tests/integration/test_job.py +++ b/tests/integration/test_job.py @@ -150,7 +150,7 @@ def test_get_job_queue(submit_job): # Submit 10 jobs, gather the job_ids in a list job_list = [submit_job() for i in range(10)] - jobs = Jobs.load().as_dict() + jobs = Jobs.load() for job in job_list: # Check to see if all the Jobs we submitted exist assert job.id in jobs diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index 49a69db2..f0ecf9b1 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -29,7 +29,7 @@ def test_load(): - name = Nodes.load()[0].name + name = Nodes.load().popitem().name # Now load the node info node = Node.load(name) @@ -56,7 +56,7 @@ def test_create(): def test_modify(): - node = Node(Nodes.load()[0].name) + node = Nodes.load().popitem() node.modify(Node(weight=10000)) assert Node.load(node.name).weight == 10000 @@ -69,4 +69,4 @@ def test_modify(): def test_parse_all(): - Node.load(Nodes.load()[0].name).as_dict() + Nodes.load().popitem().as_dict() diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index 8d7a4de4..f5697925 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -28,7 +28,7 @@ def test_load(): - part = Partitions.load()[0] + part = Partitions.load().popitem() assert part.name assert part.state @@ -49,7 +49,7 @@ def test_create_delete(): def test_modify(): - part = Partitions.load()[0] + part = Partitions.load().popitem() part.modify(Partition(default_time=120)) assert Partition.load(part.name).default_time == 120 @@ -57,8 +57,8 @@ def test_modify(): part.modify(Partition(default_time="1-00:00:00")) assert Partition.load(part.name).default_time == 24*60 - part.modify(Partition(default_time="UNLIMITED")) - assert Partition.load(part.name).default_time == "UNLIMITED" + part.modify(Partition(max_time="UNLIMITED")) + assert Partition.load(part.name).max_time == "UNLIMITED" part.modify(Partition(state="DRAIN")) assert Partition.load(part.name).state == "DRAIN" @@ -68,23 +68,22 @@ def test_modify(): def test_parse_all(): - Partitions.load()[0].as_dict() + Partitions.load().popitem().as_dict() def test_reload(): _partnames = [util.randstr() for i in range(3)] _tmp_parts = Partitions(_partnames) - for part in _tmp_parts: + for part in _tmp_parts.values(): part.create() all_parts = Partitions.load() assert len(all_parts) >= 3 my_parts = Partitions(_partnames[1:]).reload() - print(my_parts) assert len(my_parts) == 2 - for part in my_parts: + for part in my_parts.values(): assert part.state != "UNKNOWN" - for part in _tmp_parts: + for part in _tmp_parts.values(): part.delete() diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py new file mode 100644 index 00000000..0de68b16 --- /dev/null +++ b/tests/unit/test_collection.py @@ -0,0 +1,318 @@ +######################################################################### +# test_collection.py - custom collection unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_collection.py - Unit test custom collection functionality.""" + +import pytest +import pyslurm + +LOCAL_CLUSTER = pyslurm.db.cluster.LOCAL_CLUSTER +OTHER_CLUSTER = "other_cluster" + + +class TestMultiClusterMap: + + def _create_collection(self): + data = { + LOCAL_CLUSTER: { + 1: pyslurm.db.Job(1), + 2: pyslurm.db.Job(2), + }, + OTHER_CLUSTER: { + 1: pyslurm.db.Job(1, cluster="other_cluster"), + 10: pyslurm.db.Job(10, cluster="other_cluster"), + } + } + col = pyslurm.db.Jobs() + col.data = data + return col + + def test_create(self): + jobs = pyslurm.db.Jobs("101,102") + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + + jobs = pyslurm.db.Jobs([101, 102]) + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + + jobs = pyslurm.db.Jobs( + { + 101: pyslurm.db.Job(101), + 102: pyslurm.db.Job(102), + } + ) + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + assert True + + def test_add(self): + col = self._create_collection() + col_len = len(col) + + item = pyslurm.db.Job(20) + col.add(item) + + assert len(col[LOCAL_CLUSTER]) == 3 + assert len(col) == col_len+1 + + item = pyslurm.db.Job(20, cluster=OTHER_CLUSTER) + col.add(item) + + assert len(col[LOCAL_CLUSTER]) == 3 + assert len(col) == col_len+2 + + def test_remove(self): + col = self._create_collection() + col_len = len(col) + + item = pyslurm.db.Job(1) + col.remove(item) + + assert len(col[LOCAL_CLUSTER]) == 1 + assert len(col) == col_len-1 + + def test_get(self): + col = self._create_collection() + + item = col.get(1) + assert item is not None + assert isinstance(item, pyslurm.db.Job) + assert item.cluster == LOCAL_CLUSTER + + item = col.get(1, cluster=OTHER_CLUSTER) + assert item is not None + assert isinstance(item, pyslurm.db.Job) + assert item.cluster == OTHER_CLUSTER + + item = col.get(30) + assert item is None + + def test_keys(self): + col = self._create_collection() + + keys = col.keys() + keys_with_cluster = keys.with_cluster() + assert len(keys) == len(col) + + for k in keys: + assert k + + for cluster, k in keys_with_cluster: + assert cluster + assert cluster in col.data + assert k + + def test_values(self): + col = self._create_collection() + values = col.values() + + assert len(values) == len(col) + + for item in values: + assert item + print(item) + assert isinstance(item, pyslurm.db.Job) + assert item.cluster in col.data + + def test_getitem(self): + col = self._create_collection() + + item1 = col[LOCAL_CLUSTER][1] + item2 = col[1] + item3 = col[OTHER_CLUSTER][1] + + assert item1 + assert item2 + assert item3 + assert item1 == item2 + assert item1 != item3 + + with pytest.raises(KeyError): + item = col[30] + + with pytest.raises(KeyError): + item = col[OTHER_CLUSTER][30] + + def test_setitem(self): + col = self._create_collection() + col_len = len(col) + + item = pyslurm.db.Job(30) + col[item.id] = item + assert len(col[LOCAL_CLUSTER]) == 3 + assert len(col) == col_len+1 + + item = pyslurm.db.Job(50, cluster=OTHER_CLUSTER) + col[OTHER_CLUSTER][item.id] = item + assert len(col[OTHER_CLUSTER]) == 3 + assert len(col) == col_len+2 + + item = pyslurm.db.Job(100, cluster=OTHER_CLUSTER) + col[item] = item + assert len(col[OTHER_CLUSTER]) == 4 + assert len(col) == col_len+3 + + item = pyslurm.db.Job(101, cluster=OTHER_CLUSTER) + col[(item.cluster, item.id)] = item + assert len(col[OTHER_CLUSTER]) == 5 + assert len(col) == col_len+4 + + new_other_data = { + 1: pyslurm.db.Job(1), + 2: pyslurm.db.Job(2), + } + col[OTHER_CLUSTER] = new_other_data + assert len(col[OTHER_CLUSTER]) == 2 + assert len(col[LOCAL_CLUSTER]) == 3 + assert 1 in col[OTHER_CLUSTER] + assert 2 in col[OTHER_CLUSTER] + + def test_delitem(self): + col = self._create_collection() + col_len = len(col) + + del col[1] + assert len(col[LOCAL_CLUSTER]) == 1 + assert len(col) == col_len-1 + + del col[OTHER_CLUSTER][1] + assert len(col[OTHER_CLUSTER]) == 1 + assert len(col) == col_len-2 + + del col[OTHER_CLUSTER] + assert len(col) == 1 + assert OTHER_CLUSTER not in col.data + + def test_copy(self): + col = self._create_collection() + col_copy = col.copy() + assert col == col_copy + + def test_iter(self): + col = self._create_collection() + for k in col: + assert k + + def test_items(self): + col = self._create_collection() + for k, v in col.items(): + assert k + assert v + assert isinstance(v, pyslurm.db.Job) + + for c, k, v in col.items().with_cluster(): + assert c + assert k + assert v + assert isinstance(v, pyslurm.db.Job) + + def test_popitem(self): + col = self._create_collection() + col_len = len(col) + + item = col.popitem() + assert item + assert isinstance(item, pyslurm.db.Job) + assert len(col) == col_len-1 + + def test_update(self): + col = self._create_collection() + col_len = len(col) + + col_update = { + 30: pyslurm.db.Job(30), + 50: pyslurm.db.Job(50), + } + col.update(col_update) + assert len(col) == col_len+2 + assert len(col[LOCAL_CLUSTER]) == 4 + assert 30 in col + assert 50 in col + + col_update = { + "new_cluster": { + 80: pyslurm.db.Job(80, cluster="new_cluster"), + 50: pyslurm.db.Job(50, cluster="new_cluster"), + } + } + col.update(col_update) + assert len(col) == col_len+4 + assert len(col[LOCAL_CLUSTER]) == 4 + assert len(col["new_cluster"]) == 2 + assert 80 in col + assert 50 in col + + col_update = { + 200: pyslurm.db.Job(200, cluster=OTHER_CLUSTER), + 300: pyslurm.db.Job(300, cluster=OTHER_CLUSTER), + } + col.update(col_update, cluster=OTHER_CLUSTER) + assert len(col) == col_len+6 + assert len(col[OTHER_CLUSTER]) == 4 + assert 200 in col + assert 300 in col + + empty_col = pyslurm.db.Jobs() + empty_col.update(col_update) + assert len(empty_col) == 2 + + def test_pop(self): + col = self._create_collection() + col_len = len(col) + + item = col.pop(1) + assert item + assert item.id == 1 + assert len(col) == col_len-1 + + item = col.pop(999, default="def") + assert item == "def" + + def test_contains(self): + col = self._create_collection() + item = pyslurm.db.Job(1) + assert item in col + + assert 10 in col + assert 20 not in col + + assert (OTHER_CLUSTER, 10) in col + assert (LOCAL_CLUSTER, 10) not in col + + def test_to_json(self): + col = self._create_collection() + data = col.to_json(multi_cluster=True) + assert data + + def test_cluster_view(self): + col = self._create_collection() + assert len(col.clusters()) == 2 + for c in col.clusters(): + assert c diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index fc097839..66ee61ef 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -42,35 +42,6 @@ def test_filter(): job_filter._create() -def test_create_collection(): - jobs = pyslurm.db.Jobs("101,102") - assert len(jobs) == 2 - assert 101 in jobs - assert 102 in jobs - assert jobs[101].id == 101 - assert jobs[102].id == 102 - - jobs = pyslurm.db.Jobs([101, 102]) - assert len(jobs) == 2 - assert 101 in jobs - assert 102 in jobs - assert jobs[101].id == 101 - assert jobs[102].id == 102 - - jobs = pyslurm.db.Jobs( - { - 101: pyslurm.db.Job(101), - 102: pyslurm.db.Job(102), - } - ) - assert len(jobs) == 2 - assert 101 in jobs - assert 102 in jobs - assert jobs[101].id == 101 - assert jobs[102].id == 102 - assert True - - def test_create_instance(): job = pyslurm.db.Job(9999) assert job.id == 9999 diff --git a/tests/unit/test_db_qos.py b/tests/unit/test_db_qos.py index 0d2fd538..5ee2db76 100644 --- a/tests/unit/test_db_qos.py +++ b/tests/unit/test_db_qos.py @@ -39,11 +39,6 @@ def test_search_filter(): qos_filter._create() -def test_create_collection_instance(): - # TODO - assert True - - def test_create_instance(): qos = pyslurm.db.QualityOfService("test") assert qos.name == "test" diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index 755e85d9..a48460ae 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -35,34 +35,6 @@ def test_parse_all(): Node("localhost").as_dict() -def test_create_nodes_collection(): - nodes = Nodes("node1,node2").as_dict() - assert len(nodes) == 2 - assert "node1" in nodes - assert "node2" in nodes - assert nodes["node1"].name == "node1" - assert nodes["node2"].name == "node2" - - nodes = Nodes(["node1", "node2"]).as_dict() - assert len(nodes) == 2 - assert "node1" in nodes - assert "node2" in nodes - assert nodes["node1"].name == "node1" - assert nodes["node2"].name == "node2" - - nodes = Nodes( - { - "node1": Node("node1"), - "node2": Node("node2"), - } - ).as_dict() - assert len(nodes) == 2 - assert "node1" in nodes - assert "node2" in nodes - assert nodes["node1"].name == "node1" - assert nodes["node2"].name == "node2" - - def test_set_node_state(): assert _node_state_from_str("RESUME") assert _node_state_from_str("undrain") diff --git a/tests/unit/test_partition.py b/tests/unit/test_partition.py index 89403ae2..79c3e8d0 100644 --- a/tests/unit/test_partition.py +++ b/tests/unit/test_partition.py @@ -31,34 +31,6 @@ def test_create_instance(): assert part.name == "normal" -def test_create_collection(): - parts = Partitions("part1,part2").as_dict() - assert len(parts) == 2 - assert "part1" in parts - assert "part2" in parts - assert parts["part1"].name == "part1" - assert parts["part2"].name == "part2" - - parts = Partitions(["part1", "part2"]).as_dict() - assert len(parts) == 2 - assert "part1" in parts - assert "part2" in parts - assert parts["part1"].name == "part1" - assert parts["part2"].name == "part2" - - parts = Partitions( - { - "part1": Partition("part1"), - "part2": Partition("part2"), - } - ).as_dict() - assert len(parts) == 2 - assert "part1" in parts - assert "part2" in parts - assert parts["part1"].name == "part1" - assert parts["part2"].name == "part2" - - def test_parse_all(): Partition("normal").as_dict() From 299e13832e611a3f2a81e7a1fc846680bb101fda Mon Sep 17 00:00:00 2001 From: tazend Date: Mon, 10 Jul 2023 21:22:49 +0200 Subject: [PATCH 20/28] wip --- pyslurm/collections.pyx | 263 +++++++++++++++++++++++----------- tests/unit/test_collection.py | 12 +- 2 files changed, 178 insertions(+), 97 deletions(-) diff --git a/pyslurm/collections.pyx b/pyslurm/collections.pyx index 6af3d681..037886dd 100644 --- a/pyslurm/collections.pyx +++ b/pyslurm/collections.pyx @@ -24,10 +24,12 @@ from pyslurm.db.cluster import LOCAL_CLUSTER import json +import typing +from typing import Union class BaseView: - + """Base View for all other Views""" def __init__(self, mcm): self._mcm = mcm self._data = mcm.data @@ -41,36 +43,35 @@ class BaseView: class ValuesView(BaseView): + """A simple Value View + When iterating over an instance of this View, this will yield all values + from all clusters. + """ def __contains__(self, val): - # for item in self._mcm - for item in self: - if item is val or item == val: - return True + try: + item = self._mcm.get( + key=self._mcm._item_id(val), + cluster=val.cluster + ) + return item is val or item == val + except AttributeError: + pass + return False def __iter__(self): -# for item in self._mcm: -# yield item for cluster in self._mcm.data.values(): for item in cluster.values(): yield item -class MCKeysView(BaseView): - - def __contains__(self, item): - cluster, key, = item - return key in self._data[cluster] - - def __iter__(self): - for cluster, keys in self._data.items(): - for key in keys: - yield (cluster, key) - - class ClustersView(BaseView): + """A simple Cluster-Keys View + When iterating over an instance of this View, it will yield all the + Cluster names of the collection. + """ def __contains__(self, item): return item in self._data @@ -81,8 +82,35 @@ class ClustersView(BaseView): yield from self._data +class MCKeysView(BaseView): + """A Multi-Cluster Keys View + + Unlike KeysView, when iterating over an MCKeysView instance, this will + yield a 2-tuple in the form (cluster, key). + + Similarly, when checking whether this View contains a Key with the `in` + operator, a 2-tuple must be used in the form described above. + """ + def __contains__(self, item): + cluster, key, = item + return key in self._data[cluster] + + def __iter__(self): + for cluster, keys in self._data.items(): + for key in keys: + yield (cluster, key) + + class KeysView(BaseView): + """A simple Keys View of a collection + When iterating, this yields all the keys found from each Cluster in the + collection. Note that unlike the KeysView from a `dict`, the keys here + aren't unique and may appear multiple times. + + If you indeed have multiple Clusters in a collection and need to tell the + keys apart, use the `with_cluster()` function. + """ def __contains__(self, item): return item in self._mcm @@ -91,18 +119,28 @@ class KeysView(BaseView): yield from keys def with_cluster(self): + """Return a Multi-Cluster Keys View. + + Returns: + (MCKeysView): Multi-Cluster Keys View. + """ return MCKeysView(self._mcm) class ItemsView(BaseView): + """A simple Items View of a collection. + Returns a 2-tuple in the form of (key, value) when iterating. + + Similarly, when checking whether this View contains an Item with the `in` + operator, a 2-tuple must be used. + """ def __contains__(self, item): key, val = item - cluster = self._mcm._get_cluster() try: - out = self._mcm.data[cluster][key] - except KeyError: + out = self._mcm.data[item.cluster][key] + except (KeyError, AttributeError): return False else: return out is val or out == val @@ -113,11 +151,23 @@ class ItemsView(BaseView): yield (key, data[key]) def with_cluster(self): + """Return a Multi-Cluster Items View. + + Returns: + (MCItemsView): Multi-Cluster Items View. + """ return MCItemsView(self._mcm) class MCItemsView(BaseView): + """A Multi-Cluster Items View. + + This differs from ItemsView in that it returns a 3-tuple in the form of + (cluster, key, value) when iterating. + Similarly, when checking whether this View contains an Item with the `in` + operator, a 3-tuple must be used. + """ def __contains__(self, item): cluster, key, val = item @@ -163,18 +213,41 @@ cdef class MultiClusterMap: elif isinstance(data, dict): self.update(data) elif data is not None: - raise TypeError(f"Invalid Type: {type(data)}") + raise TypeError(f"Invalid Type: {type(data).__name__}") - def _get_key_and_cluster(self, item): - cluster = self._get_cluster() - key = item + def _check_for_value(self, val_id, cluster): + cluster_data = self.data.get(cluster) + if cluster_data and val_id in cluster_data: + return True + return False + + def _get_cluster(self): + cluster = None + if not self.data or LOCAL_CLUSTER in self.data: + cluster = LOCAL_CLUSTER + else: + try: + cluster = next(iter(self.keys())) + except StopIteration: + raise KeyError("Collection is Empty") from None + + return cluster + def _get_key_and_cluster(self, item): if isinstance(item, self._val_type): cluster, key = item.cluster, self._item_id(item) elif isinstance(item, tuple) and len(item) == 2: cluster, key = item + else: + cluster, key = self._get_cluster(), item + return cluster, key + def _check_val_type(self, item): + if not isinstance(item, self._val_type): + raise TypeError(f"Invalid Type: {type(item).__name__}. " + f"{self._val_type}.__name__ is required.") + def _item_id(self, item): return self._id_attr.__get__(item) @@ -212,36 +285,32 @@ cdef class MultiClusterMap: def __contains__(self, item): if isinstance(item, self._val_type): - return self._check_for_value(self._item_id(item), item.cluster) + item = (item.cluster, self._item_id(item)) + return self.get(item, default=None) is not None + # return self._check_for_value(self._item_id(item), item.cluster) elif isinstance(item, self._key_type): found = False for cluster, data in self.data.items(): if item in data: found = True return found - #return self._check_for_value(item, self._get_cluster()) elif isinstance(item, tuple): - cluster, item = item - return self._check_for_value(item, cluster) + return self.get(item, default=None) is not None + # return self._check_for_value(item, cluster) return False - def _check_for_value(self, val_id, cluster): - cluster_data = self.data.get(cluster) - if cluster_data and val_id in cluster_data: - return True - return False + def __iter__(self): + return iter(self.keys()) - def _get_cluster(self): - if not self.data or LOCAL_CLUSTER in self.data: - return LOCAL_CLUSTER - else: - return next(iter(self.keys())) + def __bool__(self): + return bool(self.data) def __copy__(self): return self.copy() def copy(self): + """Return a Copy of this instance.""" out = self.__class__.__new__(self.__class__) super(self.__class__, out).__init__( data=self.data.copy(), @@ -252,51 +321,48 @@ cdef class MultiClusterMap: ) return out - def __iter__(self): - return iter(self.keys()) - - def __bool__(self): - return bool(self.data) - - def get(self, key, cluster=None, default=None): - cluster = self._get_cluster() if not cluster else cluster + def get(self, key, default=None): + """Get the specific value for a Key""" + cluster, key = self._get_key_and_cluster(key) return self.data.get(cluster, {}).get(key, default) def add(self, item): + """An Item to add to the collection + + Note that a collection can only hold its specific type. + For example, a collection of `pyslurm.Jobs` can only hold + `pyslurm.Job` objects. Trying to add anything other than the accepted + type will raise a TypeError. + + Args: + item (typing.Any): + Item to add to the collection. + + Raises: + TypeError: When an item with an unexpected type not belonging to + the collection was added. + + Examples: + # Add a `pyslurm.Job` instance to the `Jobs` collection. + + >>> data = pyslurm.Jobs() + >>> job = pyslurm.Job(1) + >>> data.add(job) + >>> print(data) + Jobs([Job(1)]) + """ if item.cluster not in self.data: self.data[item.cluster] = {} - self.data[item.cluster][self._item_id(item)] = item - - def remove(self, item): - cluster = self._get_cluster() - key = item - if isinstance(item, self._val_type): - if self._check_for_value(self._item_id(item), item.cluster): - cluster = item.cluster - del self.data[item.cluster][self._item_id(item)] - elif isinstance(item, tuple) and len(item) == 2: - cluster, key = item - del self.data[cluster][key] - elif isinstance(item, self._key_type): - del self.data[cluster][key] - - if not self.data[cluster]: - del self.data[cluster] - -# def as_dict(self, recursive=False, multi_cluster=False): -# cdef dict out = self.data.get(self._get_cluster(), {}) - -# if multi_cluster: -# if recursive: -# return multi_dict_recursive(self) -# return self.data -# elif recursive: -# return dict_recursive(out) - -# return out + self._check_val_type(item) + self.data[item.cluster][self._item_id(item)] = item def to_json(self, multi_cluster=False): + """Convert all the whole collection to JSON. + + Returns: + (str): JSON formatted string from `json.dumps()` + """ data = multi_dict_recursive(self) if multi_cluster: return json.dumps(data) @@ -305,18 +371,39 @@ cdef class MultiClusterMap: return json.dumps(data[cluster]) def keys(self): + """Return a View of all the Keys in this collection + + Returns: + (KeysView): View of all Keys + """ return KeysView(self) def items(self): + """Return a View of all the Values in this collection + + Returns: + (ItemsView): View of all Items + """ return ItemsView(self) def values(self): + """Return a View of all the Values in this collection + + Returns: + (ValuesView): View of all Values + """ return ValuesView(self) def clusters(self): + """Return a View of all the Clusters in this collection + + Returns: + (ClustersView): View of Cluster keys + """ return ClustersView(self) def popitem(self): + """Remove and return some item in the collection""" try: item = next(iter(self.values())) except StopIteration: @@ -326,21 +413,21 @@ cdef class MultiClusterMap: return item def clear(self): + """Clear the collection""" self.data.clear() - def pop(self, key, cluster=None, default=None): - item = self.get(key, cluster=cluster, default=default) + def pop(self, key, default=None): + """Remove key from the collection and return the value""" + item = self.get(key, default=default) if item is default or item == default: return default - del self.data[item.cluster][key] - return item - - def _check_val_type(self, item): - if not isinstance(item, self._val_type): - raise TypeError(f"Invalid Type: {type(item).__name__}. " - f"{self._val_type}.__name__ is required.") + cluster = item.cluster + del self.data[cluster][key] + if not self.data[cluster]: + del self.data[cluster] + return item def _update(self, data, clus): for key in data: @@ -367,6 +454,10 @@ cdef class MultiClusterMap: def update(self, data=None, cluster=None, **kwargs): + """Update the collection. + + Functions just like `dict`'s update method. + """ if data: self._update(data, cluster) if kwargs: diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index 0de68b16..20285255 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -88,16 +88,6 @@ def test_add(self): assert len(col[LOCAL_CLUSTER]) == 3 assert len(col) == col_len+2 - def test_remove(self): - col = self._create_collection() - col_len = len(col) - - item = pyslurm.db.Job(1) - col.remove(item) - - assert len(col[LOCAL_CLUSTER]) == 1 - assert len(col) == col_len-1 - def test_get(self): col = self._create_collection() @@ -106,7 +96,7 @@ def test_get(self): assert isinstance(item, pyslurm.db.Job) assert item.cluster == LOCAL_CLUSTER - item = col.get(1, cluster=OTHER_CLUSTER) + item = col.get((OTHER_CLUSTER, 1)) assert item is not None assert isinstance(item, pyslurm.db.Job) assert item.cluster == OTHER_CLUSTER From 6670c9c2497fbe11695f4dc774e361da10ce712f Mon Sep 17 00:00:00 2001 From: tazend Date: Tue, 11 Jul 2023 20:50:31 +0200 Subject: [PATCH 21/28] wip --- docs/reference/collections.md | 6 +++ pyslurm/collections.pxd | 53 +++++++++++++++++++ pyslurm/collections.pyx | 95 ++++++++++++++++++++++++++++++----- pyslurm/db/job.pxd | 2 + pyslurm/db/job.pyx | 7 ++- tests/unit/test_collection.py | 4 +- tests/unit/test_db_job.py | 5 ++ tests/unit/test_job.py | 4 +- tests/unit/test_job_steps.py | 4 +- tests/unit/test_node.py | 2 +- tests/unit/test_partition.py | 2 +- 11 files changed, 160 insertions(+), 24 deletions(-) create mode 100644 docs/reference/collections.md diff --git a/docs/reference/collections.md b/docs/reference/collections.md new file mode 100644 index 00000000..70af6f48 --- /dev/null +++ b/docs/reference/collections.md @@ -0,0 +1,6 @@ +--- +title: collections +--- + +::: pyslurm.collections + handler: python diff --git a/pyslurm/collections.pxd b/pyslurm/collections.pxd index 5c43d932..eea38e30 100644 --- a/pyslurm/collections.pxd +++ b/pyslurm/collections.pxd @@ -24,7 +24,60 @@ cdef class MultiClusterMap: + """Mapping of Multi-Cluster Data for a Collection. + !!! note "TL;DR" + + If you have no need to write Multi-Cluster capable code and just work + on a single Cluster, Collections inheriting from this Class behave + just like a normal `dict`. + + This class enables collections to hold data from multiple Clusters if + applicable. + For quite a few Entities in Slurm it is possible to gather data from + multiple Clusters. For example, with `squeue`, you can easily list Jobs + running on different Clusters - provided your Cluster is joined in a + Federation or simply part of a multi Cluster Setup. + + Collections like `pyslurm.Jobs` inherit from this Class to enable holding + such data from multiple Clusters. + Internally, the data is structured in a `dict` like this (with + `pyslurm.Jobs` as an example): + + data = { + "LOCAL_CLUSTER": + 1: pyslurm.Job, + 2: pyslurm.Job, + ... + "OTHER_REMOTE_CLUSTER": + 100: pyslurm.Job, + 101, pyslurm.Job + ... + ... + } + + When a collection inherits from this class, its functionality will + basically simulate a standard `dict` - with a few extensions to enable + multi-cluster code. + By default, even if your Collections contains Data from multiple Clusters, + any operation will be targeted on the local Cluster data, if available. + + For example, with the data from above: + + >>> job = data[1] + + `job` would then hold the instance for Job 1 from the `LOCAL_CLUSTER` + data. + Alternatively, data can also be accessed like this: + + >>> job = data["OTHER_REMOTE_CLUSTER"][100] + + Here, you are directly specifying which Cluster data you want to access. + + Similarly, every method (where applicable) from a standard dict is + extended with multi-cluster functionality (check out the examples on the + methods) + """ cdef public dict data cdef: diff --git a/pyslurm/collections.pyx b/pyslurm/collections.pyx index 037886dd..5630ff2b 100644 --- a/pyslurm/collections.pyx +++ b/pyslurm/collections.pyx @@ -21,6 +21,7 @@ # # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 +"""Custom Collection utilities""" from pyslurm.db.cluster import LOCAL_CLUSTER import json @@ -186,8 +187,8 @@ class MCItemsView(BaseView): cdef class MultiClusterMap: - def __init__(self, data, typ=None, - val_type=None, key_type=None, id_attr=None, init_data=True): + def __init__(self, data, typ=None, val_type=None, + key_type=None, id_attr=None, init_data=True): self.data = {} if init_data else data self._typ = typ self._key_type = key_type @@ -322,7 +323,30 @@ cdef class MultiClusterMap: return out def get(self, key, default=None): - """Get the specific value for a Key""" + """Get the specific value for a Key + + This behaves like `dict`'s `get` method, with the difference that you + can additionally pass in a 2-tuple in the form of `(cluster, key)` as + the key, which can be helpful if this collection contains data from + multiple Clusters. + + If just a key without notion of the Cluster is given, access to the + local cluster data is implied. If this collection does however not + contain data from the local cluster, the first cluster detected + according to `next(iter(self.keys()))` will be used. + + Examples: + Get a Job from the LOCAL_CLUSTER + + >>> job_id = 1 + >>> job = data.get(job_id) + + Get a Job from another Cluster in the Collection, by providing a + 2-tuple with the cluster identifier: + + >>> job_id = 1 + >>> job = data.get(("REMOTE_CLUSTER", job_id)) + """ cluster, key = self._get_key_and_cluster(key) return self.data.get(cluster, {}).get(key, default) @@ -343,7 +367,7 @@ cdef class MultiClusterMap: the collection was added. Examples: - # Add a `pyslurm.Job` instance to the `Jobs` collection. + Add a `pyslurm.Job` instance to the `Jobs` collection. >>> data = pyslurm.Jobs() >>> job = pyslurm.Job(1) @@ -375,6 +399,18 @@ cdef class MultiClusterMap: Returns: (KeysView): View of all Keys + + Examples: + Iterate over all Keys from all Clusters: + + >>> for key in collection.keys() + ... print(key) + + Iterate over all Keys from all Clusters with the name of the + Cluster additionally provided: + + >>> for cluster, key in collection.keys().with_cluster() + ... print(cluster, key) """ return KeysView(self) @@ -383,6 +419,18 @@ cdef class MultiClusterMap: Returns: (ItemsView): View of all Items + + Examples: + Iterate over all Items from all Clusters: + + >>> for key, value in collection.items() + ... print(key, value) + + Iterate over all Items from all Clusters with the name of the + Cluster additionally provided: + + >>> for cluster, key, value in collection.items().with_cluster() + ... print(cluster, key, value) """ return ItemsView(self) @@ -391,6 +439,12 @@ cdef class MultiClusterMap: Returns: (ValuesView): View of all Values + + Examples: + Iterate over all Values from all Clusters: + + >>> for value in collection.values() + ... print(value) """ return ValuesView(self) @@ -399,6 +453,12 @@ cdef class MultiClusterMap: Returns: (ClustersView): View of Cluster keys + + Examples: + Iterate over all Cluster-Names the Collection contains: + + >>> for cluster in collection.clusters() + ... print(cluster) """ return ClustersView(self) @@ -417,7 +477,18 @@ cdef class MultiClusterMap: self.data.clear() def pop(self, key, default=None): - """Remove key from the collection and return the value""" + """Remove key from the collection and return the value + + This behaves like `dict`'s `pop` method, with the difference that you + can additionally pass in a 2-tuple in the form of `(cluster, key)` as + the key, which can be helpful if this collection contains data from + multiple Clusters. + + If just a key without notion of the Cluster is given, access to the + local cluster data is implied. If this collection does however not + contain data from the local cluster, the first cluster detected + according to `next(iter(self.keys()))` will be used. + """ item = self.get(key, default=default) if item is default or item == default: return default @@ -429,12 +500,12 @@ cdef class MultiClusterMap: return item - def _update(self, data, clus): + def _update(self, data): for key in data: try: iterator = iter(data[key]) except TypeError as e: - cluster = self._get_cluster() if not clus else clus + cluster = self._get_cluster() if not cluster in self.data: self.data[cluster] = {} self.data[cluster].update(data) @@ -453,15 +524,13 @@ cdef class MultiClusterMap: # k, v = item - def update(self, data=None, cluster=None, **kwargs): + def update(self, data={}, **kwargs): """Update the collection. - Functions just like `dict`'s update method. + This functions like `dict`'s `update` method. """ - if data: - self._update(data, cluster) - if kwargs: - self._update(kwargs, cluster) + self._update(data) + self._update(kwargs) def multi_reload(cur, frozen=True): diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index ed34dab4..e862703d 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -162,6 +162,8 @@ cdef class Job: Args: job_id (int, optional=0): An Integer representing a Job-ID. + cluster (str, optional=None): + Name of the Cluster for this Job. Other Parameters: admin_comment (str): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 8567c89f..3b546010 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -461,6 +461,12 @@ cdef class Job: ID of the Job to be loaded. cluster (str): Name of the Cluster to search in. + with_script (bool): + Whether the Job-Script should also be loaded. Mutually + exclusive with `with_env`. + with_env (bool): + Whether the Job Environment should also be loaded. Mutually + exclusive with `with_script`. Returns: (pyslurm.db.Job): Returns a new Database Job instance @@ -480,7 +486,6 @@ cdef class Job: >>> import pyslurm >>> db_job = pyslurm.db.Job.load(10000, with_script=True) >>> print(db_job.script) - """ cluster = LOCAL_CLUSTER if not cluster else cluster jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index 20285255..83bc19c3 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -41,7 +41,7 @@ def _create_collection(self): } } col = pyslurm.db.Jobs() - col.data = data + col.update(data) return col def test_create(self): @@ -263,7 +263,7 @@ def test_update(self): 200: pyslurm.db.Job(200, cluster=OTHER_CLUSTER), 300: pyslurm.db.Job(300, cluster=OTHER_CLUSTER), } - col.update(col_update, cluster=OTHER_CLUSTER) + col.update({OTHER_CLUSTER: col_update}) assert len(col) == col_len+6 assert len(col[OTHER_CLUSTER]) == 4 assert 200 in col diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index 66ee61ef..61a0a573 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -45,3 +45,8 @@ def test_filter(): def test_create_instance(): job = pyslurm.db.Job(9999) assert job.id == 9999 + + +def test_parse_all(): + job = pyslurm.db.Job(9999) + assert job.as_dict() diff --git a/tests/unit/test_job.py b/tests/unit/test_job.py index edcf65d4..6d5d561a 100644 --- a/tests/unit/test_job.py +++ b/tests/unit/test_job.py @@ -31,9 +31,7 @@ def test_create_instance(): def test_parse_all(): - # Use the as_dict() function to test if parsing works for all - # properties on a simple Job without error. - Job(9999).as_dict() + assert Job(9999).as_dict() def test_parse_dependencies_to_dict(): diff --git a/tests/unit/test_job_steps.py b/tests/unit/test_job_steps.py index fcd0d012..d7fc5c8d 100644 --- a/tests/unit/test_job_steps.py +++ b/tests/unit/test_job_steps.py @@ -39,6 +39,4 @@ def test_create_instance(): def test_parse_all(): - # Use the as_dict() function to test if parsing works for all - # properties on a simple JobStep without error. - JobStep(9999, 1).as_dict() + assert JobStep(9999, 1).as_dict() diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index a48460ae..e31ffea4 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -32,7 +32,7 @@ def test_create_instance(): def test_parse_all(): - Node("localhost").as_dict() + assert Node("localhost").as_dict() def test_set_node_state(): diff --git a/tests/unit/test_partition.py b/tests/unit/test_partition.py index 79c3e8d0..4f0624e7 100644 --- a/tests/unit/test_partition.py +++ b/tests/unit/test_partition.py @@ -32,7 +32,7 @@ def test_create_instance(): def test_parse_all(): - Partition("normal").as_dict() + assert Partition("normal").as_dict() def test_parse_memory(): From 4e0d77d8ee66212271dfe1e50c74162737578d37 Mon Sep 17 00:00:00 2001 From: tazend Date: Wed, 12 Jul 2023 21:47:40 +0200 Subject: [PATCH 22/28] wip --- docs/reference/collections.md | 10 ++++ docs/reference/config.md | 1 - docs/reference/constants.md | 2 - docs/reference/db/cluster.md | 1 - docs/reference/db/connection.md | 1 - docs/reference/db/event.md | 1 - docs/reference/db/job.md | 3 - docs/reference/db/jobfilter.md | 1 - docs/reference/db/jobstats.md | 1 - docs/reference/db/jobstep.md | 3 - docs/reference/db/reservation.md | 1 - docs/reference/exceptions.md | 3 - docs/reference/frontend.md | 1 - docs/reference/hostlist.md | 1 - docs/reference/job.md | 3 - docs/reference/jobstep.md | 3 - docs/reference/jobsubmitdescription.md | 1 - docs/reference/node.md | 3 - docs/reference/partition.md | 3 - docs/reference/reservation.md | 1 - docs/reference/statistics.md | 1 - docs/reference/topology.md | 1 - docs/reference/trigger.md | 1 - docs/reference/utilities.md | 24 +------- docs/stylesheets/extra.css | 5 ++ mkdocs.yml | 2 + pyslurm/collections.pxd | 10 +++- pyslurm/collections.pyx | 10 ++-- pyslurm/core/job/job.pxd | 2 +- pyslurm/core/job/job.pyx | 3 + pyslurm/core/job/step.pyx | 17 +----- pyslurm/core/node.pxd | 2 +- pyslurm/core/node.pyx | 7 ++- pyslurm/core/partition.pxd | 2 +- pyslurm/core/partition.pyx | 5 +- pyslurm/db/assoc.pyx | 20 +------ pyslurm/db/job.pxd | 2 +- pyslurm/db/job.pyx | 29 +++------- pyslurm/db/qos.pyx | 20 +------ pyslurm/db/stats.pyx | 2 +- pyslurm/db/step.pyx | 9 ++- pyslurm/db/tres.pxd | 2 +- pyslurm/db/tres.pyx | 24 ++------ pyslurm/utils/helpers.pyx | 54 ------------------ tests/integration/test_db_job.py | 2 +- tests/integration/test_db_qos.py | 2 +- tests/integration/test_job.py | 4 +- tests/integration/test_job_steps.py | 11 ++-- tests/integration/test_node.py | 2 +- tests/integration/test_partition.py | 2 +- tests/unit/test_collection.py | 20 +++++++ tests/unit/test_common.py | 79 +------------------------- tests/unit/test_db_job.py | 2 +- tests/unit/test_job.py | 2 +- tests/unit/test_job_steps.py | 2 +- tests/unit/test_node.py | 2 +- tests/unit/test_partition.py | 2 +- 57 files changed, 113 insertions(+), 317 deletions(-) diff --git a/docs/reference/collections.md b/docs/reference/collections.md index 70af6f48..650bd353 100644 --- a/docs/reference/collections.md +++ b/docs/reference/collections.md @@ -4,3 +4,13 @@ title: collections ::: pyslurm.collections handler: python + options: + members: + - MultiClusterMap + - BaseView + - KeysView + - MCKeysView + - ItemsView + - MCItemsView + - ValuesView + - ClustersView diff --git a/docs/reference/config.md b/docs/reference/config.md index 94b0438e..a461aba5 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -7,4 +7,3 @@ title: Config removed in the future when a replacement is introduced ::: pyslurm.config - handler: python diff --git a/docs/reference/constants.md b/docs/reference/constants.md index dd659b4c..65301afb 100644 --- a/docs/reference/constants.md +++ b/docs/reference/constants.md @@ -3,5 +3,3 @@ title: constants --- ::: pyslurm.constants - handler: python - diff --git a/docs/reference/db/cluster.md b/docs/reference/db/cluster.md index e6d0a900..219988d5 100644 --- a/docs/reference/db/cluster.md +++ b/docs/reference/db/cluster.md @@ -7,4 +7,3 @@ title: Cluster removed in the future when a replacement is introduced ::: pyslurm.slurmdb_clusters - handler: python diff --git a/docs/reference/db/connection.md b/docs/reference/db/connection.md index 27c904fc..7d77639e 100644 --- a/docs/reference/db/connection.md +++ b/docs/reference/db/connection.md @@ -3,4 +3,3 @@ title: Connection --- ::: pyslurm.db.Connection - handler: python diff --git a/docs/reference/db/event.md b/docs/reference/db/event.md index 020abcac..2816aaae 100644 --- a/docs/reference/db/event.md +++ b/docs/reference/db/event.md @@ -7,4 +7,3 @@ title: Event removed in the future when a replacement is introduced ::: pyslurm.slurmdb_events - handler: python diff --git a/docs/reference/db/job.md b/docs/reference/db/job.md index a2c7fadd..e806cc1f 100644 --- a/docs/reference/db/job.md +++ b/docs/reference/db/job.md @@ -7,7 +7,4 @@ title: Job will be removed in a future release ::: pyslurm.db.Job - handler: python - ::: pyslurm.db.Jobs - handler: python diff --git a/docs/reference/db/jobfilter.md b/docs/reference/db/jobfilter.md index 21aa55d1..523d7c9c 100644 --- a/docs/reference/db/jobfilter.md +++ b/docs/reference/db/jobfilter.md @@ -3,4 +3,3 @@ title: JobFilter --- ::: pyslurm.db.JobFilter - handler: python diff --git a/docs/reference/db/jobstats.md b/docs/reference/db/jobstats.md index 35f31ac6..1bc17d20 100644 --- a/docs/reference/db/jobstats.md +++ b/docs/reference/db/jobstats.md @@ -3,4 +3,3 @@ title: JobStatistics --- ::: pyslurm.db.JobStatistics - handler: python diff --git a/docs/reference/db/jobstep.md b/docs/reference/db/jobstep.md index 392fab65..a7bdc720 100644 --- a/docs/reference/db/jobstep.md +++ b/docs/reference/db/jobstep.md @@ -3,7 +3,4 @@ title: JobStep --- ::: pyslurm.db.JobStep - handler: python - ::: pyslurm.db.JobSteps - handler: python diff --git a/docs/reference/db/reservation.md b/docs/reference/db/reservation.md index 1a1af0c4..c1f110a3 100644 --- a/docs/reference/db/reservation.md +++ b/docs/reference/db/reservation.md @@ -7,4 +7,3 @@ title: Reservation removed in the future when a replacement is introduced ::: pyslurm.slurmdb_reservations - handler: python diff --git a/docs/reference/exceptions.md b/docs/reference/exceptions.md index 90876435..4abc0047 100644 --- a/docs/reference/exceptions.md +++ b/docs/reference/exceptions.md @@ -3,7 +3,4 @@ title: Exceptions --- ::: pyslurm.PyslurmError - handler: python - ::: pyslurm.RPCError - handler: python diff --git a/docs/reference/frontend.md b/docs/reference/frontend.md index 5247e540..f56a7ecd 100644 --- a/docs/reference/frontend.md +++ b/docs/reference/frontend.md @@ -7,4 +7,3 @@ title: Frontend removed in the future when a replacement is introduced ::: pyslurm.front_end - handler: python diff --git a/docs/reference/hostlist.md b/docs/reference/hostlist.md index dc2d81ee..33f8485d 100644 --- a/docs/reference/hostlist.md +++ b/docs/reference/hostlist.md @@ -7,4 +7,3 @@ title: Hostlist removed in the future when a replacement is introduced ::: pyslurm.hostlist - handler: python diff --git a/docs/reference/job.md b/docs/reference/job.md index 8e3d0c6e..cb1c19eb 100644 --- a/docs/reference/job.md +++ b/docs/reference/job.md @@ -7,7 +7,4 @@ title: Job removed in a future release ::: pyslurm.Job - handler: python - ::: pyslurm.Jobs - handler: python diff --git a/docs/reference/jobstep.md b/docs/reference/jobstep.md index 2ce6ef7f..b7b3e2b9 100644 --- a/docs/reference/jobstep.md +++ b/docs/reference/jobstep.md @@ -7,7 +7,4 @@ title: JobStep will be removed in a future release ::: pyslurm.JobStep - handler: python - ::: pyslurm.JobSteps - handler: python diff --git a/docs/reference/jobsubmitdescription.md b/docs/reference/jobsubmitdescription.md index bd31bac9..bf7eb6bd 100644 --- a/docs/reference/jobsubmitdescription.md +++ b/docs/reference/jobsubmitdescription.md @@ -3,4 +3,3 @@ title: JobSubmitDescription --- ::: pyslurm.JobSubmitDescription - handler: python diff --git a/docs/reference/node.md b/docs/reference/node.md index ccb16c54..e8e8d619 100644 --- a/docs/reference/node.md +++ b/docs/reference/node.md @@ -7,7 +7,4 @@ title: Node removed in a future release ::: pyslurm.Node - handler: python - ::: pyslurm.Nodes - handler: python diff --git a/docs/reference/partition.md b/docs/reference/partition.md index b9701f55..9181e10f 100644 --- a/docs/reference/partition.md +++ b/docs/reference/partition.md @@ -7,7 +7,4 @@ title: Partition will be removed in a future release ::: pyslurm.Partition - handler: python - ::: pyslurm.Partitions - handler: python diff --git a/docs/reference/reservation.md b/docs/reference/reservation.md index 563e29db..c5a3d891 100644 --- a/docs/reference/reservation.md +++ b/docs/reference/reservation.md @@ -7,4 +7,3 @@ title: Reservation removed in the future when a replacement is introduced ::: pyslurm.reservation - handler: python diff --git a/docs/reference/statistics.md b/docs/reference/statistics.md index 1f2b2e37..043461f8 100644 --- a/docs/reference/statistics.md +++ b/docs/reference/statistics.md @@ -7,4 +7,3 @@ title: Statistics removed in the future when a replacement is introduced ::: pyslurm.statistics - handler: python diff --git a/docs/reference/topology.md b/docs/reference/topology.md index 1cb107a1..c6b8f9cc 100644 --- a/docs/reference/topology.md +++ b/docs/reference/topology.md @@ -7,4 +7,3 @@ title: Topology removed in the future when a replacement is introduced ::: pyslurm.topology - handler: python diff --git a/docs/reference/trigger.md b/docs/reference/trigger.md index 308a3e3f..e6ea1e98 100644 --- a/docs/reference/trigger.md +++ b/docs/reference/trigger.md @@ -7,4 +7,3 @@ title: Trigger removed in the future when a replacement is introduced ::: pyslurm.trigger - handler: python diff --git a/docs/reference/utilities.md b/docs/reference/utilities.md index 63eb7bc0..dbf4a09e 100644 --- a/docs/reference/utilities.md +++ b/docs/reference/utilities.md @@ -3,37 +3,17 @@ title: utils --- ::: pyslurm.utils - handler: python + options: + members: [] ::: pyslurm.utils.timestr_to_secs - handler: python - ::: pyslurm.utils.timestr_to_mins - handler: python - ::: pyslurm.utils.secs_to_timestr - handler: python - ::: pyslurm.utils.mins_to_timestr - handler: python - ::: pyslurm.utils.date_to_timestamp - handler: python - ::: pyslurm.utils.timestamp_to_date - handler: python - ::: pyslurm.utils.expand_range_str - handler: python - ::: pyslurm.utils.humanize - handler: python - ::: pyslurm.utils.dehumanize - handler: python - ::: pyslurm.utils.nodelist_from_range_str - handler: python - ::: pyslurm.utils.nodelist_to_range_str - handler: python diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 9562d9be..565642ed 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -2,3 +2,8 @@ .md-grid { max-width: 75%; } + +/* Indentation. */ +div.doc-contents:not(.first) { + padding-left: 25px; +} diff --git a/mkdocs.yml b/mkdocs.yml index daea3007..9d81f66b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -56,6 +56,8 @@ plugins: docstring_style: google show_signature: true show_root_heading: true + show_symbol_type_toc: true + show_symbol_type_heading: true markdown_extensions: - admonition diff --git a/pyslurm/collections.pxd b/pyslurm/collections.pxd index eea38e30..24007da7 100644 --- a/pyslurm/collections.pxd +++ b/pyslurm/collections.pxd @@ -44,6 +44,7 @@ cdef class MultiClusterMap: Internally, the data is structured in a `dict` like this (with `pyslurm.Jobs` as an example): + ```python data = { "LOCAL_CLUSTER": 1: pyslurm.Job, @@ -55,6 +56,7 @@ cdef class MultiClusterMap: ... ... } + ``` When a collection inherits from this class, its functionality will basically simulate a standard `dict` - with a few extensions to enable @@ -64,13 +66,17 @@ cdef class MultiClusterMap: For example, with the data from above: - >>> job = data[1] + ```python + job = data[1] + ``` `job` would then hold the instance for Job 1 from the `LOCAL_CLUSTER` data. Alternatively, data can also be accessed like this: - >>> job = data["OTHER_REMOTE_CLUSTER"][100] + ```python + job = data["OTHER_REMOTE_CLUSTER"][100] + ``` Here, you are directly specifying which Cluster data you want to access. diff --git a/pyslurm/collections.pyx b/pyslurm/collections.pyx index 5630ff2b..47ee91c3 100644 --- a/pyslurm/collections.pyx +++ b/pyslurm/collections.pyx @@ -25,8 +25,7 @@ from pyslurm.db.cluster import LOCAL_CLUSTER import json -import typing -from typing import Union +from typing import Union, Any class BaseView: @@ -359,7 +358,7 @@ cdef class MultiClusterMap: type will raise a TypeError. Args: - item (typing.Any): + item (Any): Item to add to the collection. Raises: @@ -382,7 +381,7 @@ cdef class MultiClusterMap: self.data[item.cluster][self._item_id(item)] = item def to_json(self, multi_cluster=False): - """Convert all the whole collection to JSON. + """Convert the collection to JSON. Returns: (str): JSON formatted string from `json.dumps()` @@ -555,7 +554,8 @@ def multi_reload(cur, frozen=True): def dict_recursive(collection): cdef dict out = {} for item_id, item in collection.items(): - out[item_id] = item.as_dict() + if hasattr(item, "to_dict"): + out[item_id] = item.to_dict() return out diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index 29da1ee8..3173ae91 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -66,7 +66,7 @@ from pyslurm.slurm cimport ( cdef class Jobs(MultiClusterMap): - """A collection of [pyslurm.Job][] objects. + """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.Job][] objects. Args: jobs (Union[list, dict], optional=None): diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index f2661d57..8d70582d 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -278,6 +278,9 @@ cdef class Job: src.ptr = tmp def as_dict(self): + return self.to_dict() + + def to_dict(self): """Job information formatted as a dictionary. Returns: diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 81b84a0d..9cfbd4ea 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -59,20 +59,6 @@ cdef class JobSteps(dict): elif steps is not None: raise TypeError("Invalid Type: {type(steps)}") - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return self if not recursive else collections.dict_recursive(self) - @staticmethod def load(job): """Load the Job Steps from the system. @@ -326,6 +312,9 @@ cdef class JobStep: verify_rpc(slurm_update_step(js.umsg)) def as_dict(self): + return self.to_dict() + + def to_dict(self): """JobStep information formatted as a dictionary. Returns: diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index d769b614..3e2f51ee 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -59,7 +59,7 @@ from pyslurm.collections cimport MultiClusterMap cdef class Nodes(MultiClusterMap): - """A collection of [pyslurm.Node][] objects. + """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.Node][] objects. Args: nodes (Union[list, dict, str], optional=None): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 24c8fcee..8b4328d4 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -38,8 +38,6 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, nodelist_from_range_str, nodelist_to_range_str, ) @@ -389,6 +387,9 @@ cdef class Node: verify_rpc(slurm_delete_node(self.umsg)) def as_dict(self): + return self.to_dict() + + def to_dict(self): """Node information formatted as a dictionary. Returns: @@ -397,7 +398,7 @@ cdef class Node: Examples: >>> import pyslurm >>> mynode = pyslurm.Node.load("mynode") - >>> mynode_dict = mynode.as_dict() + >>> mynode_dict = mynode.to_dict() """ return instance_to_dict(self) diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index 147a589a..7fc22ee3 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -60,7 +60,7 @@ from pyslurm.collections cimport MultiClusterMap cdef class Partitions(MultiClusterMap): - """A collection of [pyslurm.Partition][] objects. + """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.Partition][] objects. Args: partitions (Union[list[str], dict[str, Partition], str], optional=None): diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 18a6d132..35eeaa44 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -203,6 +203,9 @@ cdef class Partition: return self.name def as_dict(self): + return self.to_dict() + + def to_dict(self): """Partition information formatted as a dictionary. Returns: @@ -211,7 +214,7 @@ cdef class Partition: Examples: >>> import pyslurm >>> mypart = pyslurm.Partition.load("mypart") - >>> mypart_dict = mypart.as_dict() + >>> mypart_dict = mypart.to_dict() """ return instance_to_dict(self) diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index d9426944..116666b3 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -25,14 +25,12 @@ from pyslurm.core.error import RPCError from pyslurm.utils.helpers import ( instance_to_dict, - collection_to_dict, - group_collection_by_cluster, user_to_uid, ) from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm import collections +import pyslurm.collections as collections cdef class Associations(MultiClusterMap): @@ -44,20 +42,6 @@ cdef class Associations(MultiClusterMap): id_attr=Association.id, key_type=int) - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return super().as_dict(recursive) - @staticmethod def load(AssociationFilter db_filter=None, Connection db_connection=None): cdef: @@ -230,7 +214,7 @@ cdef class Association: wrap.ptr = in_ptr return wrap - def as_dict(self): + def to_dict(self): """Database Association information formatted as a dictionary. Returns: diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index e862703d..4576a285 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -152,7 +152,7 @@ cdef class JobFilter: cdef class Jobs(MultiClusterMap): - """A collection of [pyslurm.db.Job][] objects.""" + """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.db.Job][] objects.""" pass diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 3b546010..3b31c302 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -28,7 +28,7 @@ from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm import collections +import pyslurm.collections as collections from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -41,8 +41,6 @@ from pyslurm.utils.helpers import ( uid_to_name, nodelist_to_range_str, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, _get_exit_code, ) from pyslurm.db.connection import _open_conn_or_error @@ -199,20 +197,6 @@ cdef class Jobs(MultiClusterMap): id_attr=Job.id, key_type=int) - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return super().as_dict(recursive) - @staticmethod def load(JobFilter db_filter=None, Connection db_connection=None): """Load Jobs from the Slurm Database @@ -510,7 +494,10 @@ cdef class Job: self.steps[step.id] = step def as_dict(self): - """Database Job information formatted as a dictionary. + return self.to_dict() + + def to_dict(self): + """Convert Database Job information to a dictionary. Returns: (dict): Database Job information as dict @@ -518,17 +505,17 @@ cdef class Job: Examples: >>> import pyslurm >>> myjob = pyslurm.db.Job.load(10000) - >>> myjob_dict = myjob.as_dict() + >>> myjob_dict = myjob.to_dict() """ cdef dict out = instance_to_dict(self) if self.stats: - out["stats"] = self.stats.as_dict() + out["stats"] = self.stats.to_dict() steps = out.pop("steps", {}) out["steps"] = {} for step_id, step in steps.items(): - out["steps"][step_id] = step.as_dict() + out["steps"][step_id] = step.to_dict() return out diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 16db21a9..299c0ed9 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,9 +23,8 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.utils.helpers import instance_to_dict from pyslurm.db.connection import _open_conn_or_error -from pyslurm import collections cdef class QualitiesOfService(dict): @@ -33,21 +32,6 @@ cdef class QualitiesOfService(dict): def __init__(self): pass - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return self if not recursive else collections.dict_recursive(self) - - @staticmethod def load(QualityOfServiceFilter db_filter=None, Connection db_connection=None, name_is_key=True): @@ -175,7 +159,7 @@ cdef class QualityOfService: def __repr__(self): return f'{self.__class__.__name__}({self.name})' - def as_dict(self): + def to_dict(self): """Database QualityOfService information formatted as a dictionary. Returns: diff --git a/pyslurm/db/stats.pyx b/pyslurm/db/stats.pyx index 3ae0c8b5..7bbb2a8a 100644 --- a/pyslurm/db/stats.pyx +++ b/pyslurm/db/stats.pyx @@ -47,7 +47,7 @@ cdef class JobStatistics: self.min_cpu_time_node = None self.min_cpu_time_task = None - def as_dict(self): + def to_dict(self): return instance_to_dict(self) @staticmethod diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index fa4ab8bb..e39af066 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -57,9 +57,14 @@ cdef class JobStep: wrap.stats = JobStatistics.from_step(wrap) return wrap - def as_dict(self): + def to_dict(self): + """Convert Database JobStep information to a dictionary. + + Returns: + (dict): Database JobStep information as dict + """ cdef dict out = instance_to_dict(self) - out["stats"] = self.stats.as_dict() + out["stats"] = self.stats.to_dict() return out @property diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index ef1568f6..23b44ad2 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -69,7 +69,7 @@ cdef class TrackableResourceFilter: cdef slurmdb_tres_cond_t *ptr -cdef class TrackableResources(list): +cdef class TrackableResources(dict): cdef public raw_str @staticmethod diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index cea5904e..78195654 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -25,7 +25,7 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.utils.helpers import instance_to_dict from pyslurm.utils import cstr from pyslurm.db.connection import _open_conn_or_error import json @@ -76,7 +76,7 @@ cdef class TrackableResourceLimits: return out def _validate(self, TrackableResources tres_data): - id_dict = _tres_names_to_ids(self.as_dict(flatten_limits=True), + id_dict = _tres_names_to_ids(self.to_dict(flatten_limits=True), tres_data) return id_dict @@ -91,7 +91,7 @@ cdef class TrackableResourceLimits: return out - def as_dict(self, flatten_limits=False): + def to_dict(self, flatten_limits=False): cdef dict inst_dict = instance_to_dict(self) if flatten_limits: @@ -134,25 +134,11 @@ cdef class TrackableResourceFilter: self._alloc() -cdef class TrackableResources(list): +cdef class TrackableResources(dict): def __init__(self): pass - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - return self if not recursive else collections.dict_recursive(self) - @staticmethod def load(Connection db_connection=None, name_is_key=True): """Load Trackable Resources from the Database. @@ -246,7 +232,7 @@ cdef class TrackableResource: wrap.ptr = in_ptr return wrap - def as_dict(self): + def to_dict(self): return instance_to_dict(self) @property diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index fb1d2201..9fcd5896 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -341,60 +341,6 @@ def instance_to_dict(inst): return out -def collection_to_dict(collection, identifier, recursive=False, group_id=None): - cdef dict out = {} - - for item in collection: - cluster = item.cluster - if cluster not in out: - out[cluster] = {} - - _id = identifier.__get__(item) - data = item if not recursive else item.as_dict() - - if group_id: - grp_id = group_id.__get__(item) - if grp_id not in out[cluster]: - out[cluster][grp_id] = {} - out[cluster][grp_id].update({_id: data}) - else: - out[cluster][_id] = data - - return out - - -def collection_to_dict_global(collection, identifier, recursive=False): - cdef dict out = {} - for item in collection: - _id = identifier.__get__(item) - out[_id] = item if not recursive else item.as_dict() - return out - - -def group_collection_by_cluster(collection): - cdef dict out = {} - collection_type = type(collection) - - for item in collection: - cluster = item.cluster - if cluster not in out: - out[cluster] = collection_type() - - out[cluster].append(item) - - return out - - -def _sum_prop(obj, name, startval=0): - val = startval - for n in obj.values(): - v = name.__get__(n) - if v is not None: - val += v - - return val - - def _get_exit_code(exit_code): exit_state=sig = 0 if exit_code != slurm.NO_VAL: diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py index 571ec0d2..1ea59690 100644 --- a/tests/integration/test_db_job.py +++ b/tests/integration/test_db_job.py @@ -49,7 +49,7 @@ def test_parse_all(submit_job): job = submit_job() util.wait() db_job = pyslurm.db.Job.load(job.id) - job_dict = db_job.as_dict() + job_dict = db_job.to_dict() assert job_dict["stats"] assert job_dict["steps"] diff --git a/tests/integration/test_db_qos.py b/tests/integration/test_db_qos.py index 11d9e870..e1cde024 100644 --- a/tests/integration/test_db_qos.py +++ b/tests/integration/test_db_qos.py @@ -38,7 +38,7 @@ def test_load_single(): def test_parse_all(submit_job): qos = pyslurm.db.QualityOfService.load("normal") - qos_dict = qos.as_dict() + qos_dict = qos.to_dict() assert qos_dict assert qos_dict["name"] == qos.name diff --git a/tests/integration/test_job.py b/tests/integration/test_job.py index 15c4bdef..9788af45 100644 --- a/tests/integration/test_job.py +++ b/tests/integration/test_job.py @@ -35,9 +35,7 @@ def test_parse_all(submit_job): job = submit_job() - # Use the as_dict() function to test if parsing works for all - # properties on a simple Job without error. - Job.load(job.id).as_dict() + Job.load(job.id).to_dict() def test_load(submit_job): diff --git a/tests/integration/test_job_steps.py b/tests/integration/test_job_steps.py index b24409f5..8d13ba9f 100644 --- a/tests/integration/test_job_steps.py +++ b/tests/integration/test_job_steps.py @@ -102,7 +102,7 @@ def test_collection(submit_job): job = submit_job(script=create_job_script_multi_step()) time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job).as_dict() + steps = JobSteps.load(job) assert steps # We have 3 Steps: batch, 0 and 1 @@ -116,7 +116,7 @@ def test_cancel(submit_job): job = submit_job(script=create_job_script_multi_step()) time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job).as_dict() + steps = JobSteps.load(job) assert len(steps) == 3 assert ("batch" in steps and 0 in steps and @@ -125,7 +125,7 @@ def test_cancel(submit_job): steps[0].cancel() time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job).as_dict() + steps = JobSteps.load(job) assert len(steps) == 2 assert ("batch" in steps and 1 in steps) @@ -173,8 +173,5 @@ def test_load_with_wrong_step_id(submit_job): def test_parse_all(submit_job): job = submit_job() - - # Use the as_dict() function to test if parsing works for all - # properties on a simple JobStep without error. time.sleep(util.WAIT_SECS_SLURMCTLD) - JobStep.load(job, "batch").as_dict() + JobStep.load(job, "batch").to_dict() diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index f0ecf9b1..b79c38d4 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -69,4 +69,4 @@ def test_modify(): def test_parse_all(): - Nodes.load().popitem().as_dict() + Nodes.load().popitem().to_dict() diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index f5697925..a748c58b 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -68,7 +68,7 @@ def test_modify(): def test_parse_all(): - Partitions.load().popitem().as_dict() + Partitions.load().popitem().to_dict() def test_reload(): diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index 83bc19c3..399088c7 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -22,6 +22,8 @@ import pytest import pyslurm +import pyslurm.collections +from pyslurm.collections import sum_property LOCAL_CLUSTER = pyslurm.db.cluster.LOCAL_CLUSTER OTHER_CLUSTER = "other_cluster" @@ -306,3 +308,21 @@ def test_cluster_view(self): assert len(col.clusters()) == 2 for c in col.clusters(): assert c + + def test_sum_property(self): + class TestObject: + @property + def memory(self): + return 10240 + + @property + def cpus(self): + return None + + object_dict = {i: TestObject() for i in range(10)} + + expected = 10240 * 10 + assert sum_property(object_dict, TestObject.memory) == expected + + expected = 0 + assert sum_property(object_dict, TestObject.cpus) == expected diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index 1598d191..c9a52117 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -55,12 +55,11 @@ nodelist_from_range_str, nodelist_to_range_str, instance_to_dict, - collection_to_dict, - collection_to_dict_global, - group_collection_by_cluster, - _sum_prop, ) from pyslurm.utils import cstr +from pyslurm.collections import ( + sum_property, +) class TestStrings: @@ -414,75 +413,3 @@ def test_nodelist_to_range_str(self): assert "node[001,007-009]" == nodelist_to_range_str(nodelist) assert "node[001,007-009]" == nodelist_to_range_str(nodelist_str) - def test_summarize_property(self): - class TestObject: - @property - def memory(self): - return 10240 - - @property - def cpus(self): - return None - - object_dict = {i: TestObject() for i in range(10)} - - expected = 10240 * 10 - assert _sum_prop(object_dict, TestObject.memory) == expected - - expected = 0 - assert _sum_prop(object_dict, TestObject.cpus) == expected - - def test_collection_to_dict(self): - class TestObject: - - def __init__(self, _id, _grp_id, cluster): - self._id = _id - self._grp_id = _grp_id - self.cluster = cluster - - @property - def id(self): - return self._id - - @property - def group_id(self): - return self._grp_id - - def as_dict(self): - return instance_to_dict(self) - - class TestCollection(list): - - def __init__(self, data): - super().__init__() - self.extend(data) - - OFFSET = 100 - RANGE = 10 - - data = [TestObject(x, x+OFFSET, "TestCluster") for x in range(RANGE)] - collection = TestCollection(data) - - coldict = collection_to_dict(collection, identifier=TestObject.id) - coldict = coldict.get("TestCluster", {}) - - assert len(coldict) == RANGE - for i in range(RANGE): - assert i in coldict - assert isinstance(coldict[i], TestObject) - - coldict = collection_to_dict(collection, identifier=TestObject.id, - group_id=TestObject.group_id) - coldict = coldict.get("TestCluster", {}) - - assert len(coldict) == RANGE - for i in range(RANGE): - assert i+OFFSET in coldict - assert i in coldict[i+OFFSET] - - coldict = collection_to_dict(collection, identifier=TestObject.id, - recursive=True) - coldict = coldict.get("TestCluster", {}) - - for item in coldict.values(): - assert isinstance(item, dict) diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index 61a0a573..c2ae8bb0 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -49,4 +49,4 @@ def test_create_instance(): def test_parse_all(): job = pyslurm.db.Job(9999) - assert job.as_dict() + assert job.to_dict() diff --git a/tests/unit/test_job.py b/tests/unit/test_job.py index 6d5d561a..863fcfab 100644 --- a/tests/unit/test_job.py +++ b/tests/unit/test_job.py @@ -31,7 +31,7 @@ def test_create_instance(): def test_parse_all(): - assert Job(9999).as_dict() + assert Job(9999).to_dict() def test_parse_dependencies_to_dict(): diff --git a/tests/unit/test_job_steps.py b/tests/unit/test_job_steps.py index d7fc5c8d..c8c52352 100644 --- a/tests/unit/test_job_steps.py +++ b/tests/unit/test_job_steps.py @@ -39,4 +39,4 @@ def test_create_instance(): def test_parse_all(): - assert JobStep(9999, 1).as_dict() + assert JobStep(9999, 1).to_dict() diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index e31ffea4..c4dba73e 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -32,7 +32,7 @@ def test_create_instance(): def test_parse_all(): - assert Node("localhost").as_dict() + assert Node("localhost").to_dict() def test_set_node_state(): diff --git a/tests/unit/test_partition.py b/tests/unit/test_partition.py index 4f0624e7..b699893c 100644 --- a/tests/unit/test_partition.py +++ b/tests/unit/test_partition.py @@ -32,7 +32,7 @@ def test_create_instance(): def test_parse_all(): - assert Partition("normal").as_dict() + assert Partition("normal").to_dict() def test_parse_memory(): From 91c72b32eefd68d581321e249489eda6e48896e0 Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 13 Jul 2023 20:28:20 +0200 Subject: [PATCH 23/28] wip --- .../{collections.md => xcollections.md} | 4 ++-- pyslurm/core/job/job.pxd | 4 ++-- pyslurm/core/job/job.pyx | 12 +++++----- pyslurm/core/job/step.pyx | 2 +- pyslurm/core/node.pxd | 4 ++-- pyslurm/core/node.pyx | 22 +++++++++---------- pyslurm/core/partition.pxd | 4 ++-- pyslurm/core/partition.pyx | 8 +++---- pyslurm/db/assoc.pxd | 2 +- pyslurm/db/assoc.pyx | 2 +- pyslurm/db/job.pxd | 4 ++-- pyslurm/db/job.pyx | 4 ++-- pyslurm/{collections.pxd => xcollections.pxd} | 0 pyslurm/{collections.pyx => xcollections.pyx} | 0 tests/unit/test_collection.py | 4 ++-- tests/unit/test_common.py | 2 +- 16 files changed, 39 insertions(+), 39 deletions(-) rename docs/reference/{collections.md => xcollections.md} (85%) rename pyslurm/{collections.pxd => xcollections.pxd} (100%) rename pyslurm/{collections.pyx => xcollections.pyx} (100%) diff --git a/docs/reference/collections.md b/docs/reference/xcollections.md similarity index 85% rename from docs/reference/collections.md rename to docs/reference/xcollections.md index 650bd353..fd57ec09 100644 --- a/docs/reference/collections.md +++ b/docs/reference/xcollections.md @@ -1,8 +1,8 @@ --- -title: collections +title: xcollections --- -::: pyslurm.collections +::: pyslurm.xcollections handler: python options: members: diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index 3173ae91..81442413 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -30,7 +30,7 @@ from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t from libc.stdlib cimport free from pyslurm.core.job.submission cimport JobSubmitDescription from pyslurm.core.job.step cimport JobSteps, JobStep -from pyslurm.collections cimport MultiClusterMap +from pyslurm.xcollections cimport MultiClusterMap from pyslurm cimport slurm from pyslurm.slurm cimport ( working_cluster_rec, @@ -66,7 +66,7 @@ from pyslurm.slurm cimport ( cdef class Jobs(MultiClusterMap): - """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.Job][] objects. + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Job][] objects. Args: jobs (Union[list, dict], optional=None): diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 8d70582d..31da0c7e 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -35,7 +35,7 @@ from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm import collections +from pyslurm import xcollections from pyslurm.core.error import ( RPCError, verify_rpc, @@ -137,7 +137,7 @@ cdef class Jobs(MultiClusterMap): Raises: RPCError: When getting the Jobs from the slurmctld failed. """ - return collections.multi_reload(self, frozen=self.frozen) + return xcollections.multi_reload(self, frozen=self.frozen) def load_steps(self): """Load all Job steps for this collection of Jobs. @@ -161,19 +161,19 @@ cdef class Jobs(MultiClusterMap): @property def memory(self): - return collections.sum_property(self, Job.memory) + return xcollections.sum_property(self, Job.memory) @property def cpus(self): - return collections.sum_property(self, Job.cpus) + return xcollections.sum_property(self, Job.cpus) @property def ntasks(self): - return collections.sum_property(self, Job.ntasks) + return xcollections.sum_property(self, Job.ntasks) @property def cpu_time(self): - return collections.sum_property(self, Job.cpu_time) + return xcollections.sum_property(self, Job.cpu_time) cdef class Job: diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 9cfbd4ea..26927d9c 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -27,7 +27,7 @@ from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm import collections +from pyslurm import xcollections from pyslurm.utils.helpers import ( signal_to_num, instance_to_dict, diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 3e2f51ee..60d9928f 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -55,11 +55,11 @@ from pyslurm.utils cimport cstr from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * -from pyslurm.collections cimport MultiClusterMap +from pyslurm.xcollections cimport MultiClusterMap cdef class Nodes(MultiClusterMap): - """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.Node][] objects. + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Node][] objects. Args: nodes (Union[list, dict, str], optional=None): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 8b4328d4..11638ff3 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -29,7 +29,7 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm import collections +from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -132,7 +132,7 @@ cdef class Nodes(MultiClusterMap): Raises: RPCError: When getting the Nodes from the slurmctld failed. """ - return collections.multi_reload(self) + return xcollections.multi_reload(self) def modify(self, Node changes): """Modify all Nodes in a collection. @@ -163,39 +163,39 @@ cdef class Nodes(MultiClusterMap): @property def free_memory(self): - return collections.sum_property(self, Node.free_memory) + return xcollections.sum_property(self, Node.free_memory) @property def real_memory(self): - return collections.sum_property(self, Node.real_memory) + return xcollections.sum_property(self, Node.real_memory) @property def allocated_memory(self): - return collections.sum_property(self, Node.allocated_memory) + return xcollections.sum_property(self, Node.allocated_memory) @property def total_cpus(self): - return collections.sum_property(self, Node.total_cpus) + return xcollections.sum_property(self, Node.total_cpus) @property def idle_cpus(self): - return collections.sum_property(self, Node.idle_cpus) + return xcollections.sum_property(self, Node.idle_cpus) @property def allocated_cpus(self): - return collections.sum_property(self, Node.allocated_cpus) + return xcollections.sum_property(self, Node.allocated_cpus) @property def effective_cpus(self): - return collections.sum_property(self, Node.effective_cpus) + return xcollections.sum_property(self, Node.effective_cpus) @property def current_watts(self): - return collections.sum_property(self, Node.current_watts) + return xcollections.sum_property(self, Node.current_watts) @property def avg_watts(self): - return collections.sum_property(self, Node.avg_watts) + return xcollections.sum_property(self, Node.avg_watts) cdef class Node: diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index 7fc22ee3..a5a638df 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -56,11 +56,11 @@ from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * from pyslurm.core cimport slurmctld -from pyslurm.collections cimport MultiClusterMap +from pyslurm.xcollections cimport MultiClusterMap cdef class Partitions(MultiClusterMap): - """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.Partition][] objects. + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Partition][] objects. Args: partitions (Union[list[str], dict[str, Partition], str], optional=None): diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 35eeaa44..f579afa3 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -31,7 +31,7 @@ from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED from pyslurm.db.cluster import LOCAL_CLUSTER -from pyslurm import collections +from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -123,7 +123,7 @@ cdef class Partitions(MultiClusterMap): Raises: RPCError: When getting the Partitions from the slurmctld failed. """ - return collections.multi_reload(self) + return xcollections.multi_reload(self) def modify(self, changes): """Modify all Partitions in a Collection. @@ -151,11 +151,11 @@ cdef class Partitions(MultiClusterMap): @property def total_cpus(self): - return collections.sum_property(self, Partition.total_cpus) + return xcollections.sum_property(self, Partition.total_cpus) @property def total_nodes(self): - return collections.sum_property(self, Partition.total_nodes) + return xcollections.sum_property(self, Partition.total_nodes) cdef class Partition: diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 912f0d6e..384dbb0a 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -49,7 +49,7 @@ from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.utils.uint cimport * from pyslurm.db.qos cimport QualitiesOfService, _set_qos_list -from pyslurm.collections cimport MultiClusterMap +from pyslurm.xcollections cimport MultiClusterMap cdef _parse_assoc_ptr(Association ass) cdef _create_assoc_ptr(Association ass, conn=*) diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 116666b3..87b00385 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -30,7 +30,7 @@ from pyslurm.utils.helpers import ( from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error from pyslurm.db.cluster import LOCAL_CLUSTER -import pyslurm.collections as collections +import pyslurm.xcollections as xcollections cdef class Associations(MultiClusterMap): diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index 4576a285..bf21c003 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -53,7 +53,7 @@ from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.db.qos cimport QualitiesOfService from pyslurm.db.tres cimport TrackableResources, TrackableResource -from pyslurm.collections cimport MultiClusterMap +from pyslurm.xcollections cimport MultiClusterMap cdef class JobFilter: @@ -152,7 +152,7 @@ cdef class JobFilter: cdef class Jobs(MultiClusterMap): - """A [`Multi Cluster`][pyslurm.collections.MultiClusterMap] collection of [pyslurm.db.Job][] objects.""" + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.db.Job][] objects.""" pass diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 3b31c302..213111b7 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -28,7 +28,7 @@ from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * from pyslurm.db.cluster import LOCAL_CLUSTER -import pyslurm.collections as collections +import pyslurm.xcollections as xcollections from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -474,7 +474,7 @@ cdef class Job: cluster = LOCAL_CLUSTER if not cluster else cluster jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], with_script=with_script, with_env=with_env) - job = Jobs.load(jfilter).get(int(job_id), cluster=cluster) + job = Jobs.load(jfilter).get((cluster, int(job_id))) if not job: raise RPCError(msg=f"Job {job_id} does not exist on " f"Cluster {cluster}") diff --git a/pyslurm/collections.pxd b/pyslurm/xcollections.pxd similarity index 100% rename from pyslurm/collections.pxd rename to pyslurm/xcollections.pxd diff --git a/pyslurm/collections.pyx b/pyslurm/xcollections.pyx similarity index 100% rename from pyslurm/collections.pyx rename to pyslurm/xcollections.pyx diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index 399088c7..45c182f9 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -22,8 +22,8 @@ import pytest import pyslurm -import pyslurm.collections -from pyslurm.collections import sum_property +import pyslurm.xcollections +from pyslurm.xcollections import sum_property LOCAL_CLUSTER = pyslurm.db.cluster.LOCAL_CLUSTER OTHER_CLUSTER = "other_cluster" diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index c9a52117..cf5353b1 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -57,7 +57,7 @@ instance_to_dict, ) from pyslurm.utils import cstr -from pyslurm.collections import ( +from pyslurm.xcollections import ( sum_property, ) From abae18dbe5135e90bbc89eacc5edb25a108f6736 Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 13 Jul 2023 20:52:23 +0200 Subject: [PATCH 24/28] wip --- pyslurm/core/job/job.pyx | 2 +- pyslurm/core/job/step.pyx | 2 +- pyslurm/core/node.pyx | 2 +- pyslurm/core/partition.pyx | 2 +- pyslurm/db/__init__.py | 1 - pyslurm/db/assoc.pyx | 4 ++-- pyslurm/db/cluster.pxd | 27 ------------------------ pyslurm/db/job.pyx | 4 ++-- pyslurm/{db/cluster.pyx => settings.pyx} | 4 +++- pyslurm/xcollections.pyx | 2 +- tests/unit/test_collection.py | 3 +-- 11 files changed, 13 insertions(+), 40 deletions(-) delete mode 100644 pyslurm/db/cluster.pxd rename pyslurm/{db/cluster.pyx => settings.pyx} (92%) diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 31da0c7e..8d357b85 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -34,7 +34,7 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER from pyslurm import xcollections from pyslurm.core.error import ( RPCError, diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 26927d9c..da212a4a 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -26,7 +26,7 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER from pyslurm import xcollections from pyslurm.utils.helpers import ( signal_to_num, diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 11638ff3..cb22fcfc 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -28,7 +28,7 @@ from pyslurm.utils import ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index f579afa3..e1a1b6b1 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -30,7 +30,7 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index 0e78a734..acd36a40 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -42,4 +42,3 @@ Association, AssociationFilter, ) -from . import cluster diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 87b00385..4e535a46 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -29,8 +29,8 @@ from pyslurm.utils.helpers import ( ) from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error -from pyslurm.db.cluster import LOCAL_CLUSTER -import pyslurm.xcollections as xcollections +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections cdef class Associations(MultiClusterMap): diff --git a/pyslurm/db/cluster.pxd b/pyslurm/db/cluster.pxd deleted file mode 100644 index 30acdbde..00000000 --- a/pyslurm/db/cluster.pxd +++ /dev/null @@ -1,27 +0,0 @@ -######################################################################### -# cluster.pxd - pyslurm slurmdbd cluster api -######################################################################### -# Copyright (C) 2023 Toni Harzendorf -# -# This file is part of PySlurm -# -# PySlurm is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -# PySlurm is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with PySlurm; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# cython: c_string_type=unicode, c_string_encoding=default -# cython: language_level=3 - - -from pyslurm cimport slurm -from pyslurm.utils cimport cstr diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 213111b7..befd9515 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -27,8 +27,8 @@ from pyslurm.core.error import RPCError, PyslurmError from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * -from pyslurm.db.cluster import LOCAL_CLUSTER -import pyslurm.xcollections as xcollections +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, diff --git a/pyslurm/db/cluster.pyx b/pyslurm/settings.pyx similarity index 92% rename from pyslurm/db/cluster.pyx rename to pyslurm/settings.pyx index 436183a8..5085a9f5 100644 --- a/pyslurm/db/cluster.pyx +++ b/pyslurm/settings.pyx @@ -1,5 +1,5 @@ ######################################################################### -# cluster.pyx - pyslurm slurmdbd cluster api +# settings.pyx - pyslurm global settings ######################################################################### # Copyright (C) 2023 Toni Harzendorf # @@ -23,6 +23,8 @@ # cython: language_level=3 from pyslurm.core import slurmctld +from pyslurm cimport slurm +from pyslurm.utils cimport cstr LOCAL_CLUSTER = cstr.to_unicode(slurm.slurm_conf.cluster_name) diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index 47ee91c3..ac5dcbc7 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -23,7 +23,7 @@ # cython: language_level=3 """Custom Collection utilities""" -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER import json from typing import Union, Any diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index 45c182f9..64c14c89 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -22,10 +22,9 @@ import pytest import pyslurm -import pyslurm.xcollections from pyslurm.xcollections import sum_property -LOCAL_CLUSTER = pyslurm.db.cluster.LOCAL_CLUSTER +LOCAL_CLUSTER = pyslurm.settings.LOCAL_CLUSTER OTHER_CLUSTER = "other_cluster" From d71afb2f7792feb28ce00812c9dc27c61c9b2483 Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 13 Jul 2023 21:01:19 +0200 Subject: [PATCH 25/28] wip --- pyslurm/xcollections.pyx | 7 ++++--- tests/integration/test_node.py | 7 ++++--- tests/integration/test_partition.py | 7 ++++--- tests/unit/test_collection.py | 3 ++- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index ac5dcbc7..0037ef46 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -462,14 +462,15 @@ cdef class MultiClusterMap: return ClustersView(self) def popitem(self): - """Remove and return some item in the collection""" + """Remove and return a (key, value) pair as a 2-tuple""" try: item = next(iter(self.values())) except StopIteration: raise KeyError from None - del self.data[item.cluster][self._item_id(item)] - return item + key = self._item_id(item) + del self.data[item.cluster][key] + return (key, item) def clear(self): """Clear the collection""" diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index b79c38d4..a1c9f6b6 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -29,7 +29,7 @@ def test_load(): - name = Nodes.load().popitem().name + name, _ = Nodes.load().popitem() # Now load the node info node = Node.load(name) @@ -56,7 +56,7 @@ def test_create(): def test_modify(): - node = Nodes.load().popitem() + _, node = Nodes.load().popitem() node.modify(Node(weight=10000)) assert Node.load(node.name).weight == 10000 @@ -69,4 +69,5 @@ def test_modify(): def test_parse_all(): - Nodes.load().popitem().to_dict() + _, node = Nodes.load().popitem() + assert node.to_dict() diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index a748c58b..712eeaff 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -28,7 +28,7 @@ def test_load(): - part = Partitions.load().popitem() + name, part = Partitions.load().popitem() assert part.name assert part.state @@ -49,7 +49,7 @@ def test_create_delete(): def test_modify(): - part = Partitions.load().popitem() + _, part = Partitions.load().popitem() part.modify(Partition(default_time=120)) assert Partition.load(part.name).default_time == 120 @@ -68,7 +68,8 @@ def test_modify(): def test_parse_all(): - Partitions.load().popitem().to_dict() + _, part = Partitions.load().popitem() + assert part.to_dict() def test_reload(): diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index 64c14c89..ccb27779 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -228,8 +228,9 @@ def test_popitem(self): col = self._create_collection() col_len = len(col) - item = col.popitem() + key, item = col.popitem() assert item + assert key assert isinstance(item, pyslurm.db.Job) assert len(col) == col_len-1 From cfd23e734d48889f3f385e95d14615af3f5bbb5b Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 13 Jul 2023 22:12:41 +0200 Subject: [PATCH 26/28] wip --- CHANGELOG.md | 1 + docs/stylesheets/extra.css | 1 + pyslurm/core/job/job.pxd | 9 +++------ pyslurm/core/job/job.pyx | 23 ++++++++++++----------- pyslurm/core/job/step.pxd | 5 +---- pyslurm/core/job/step.pyx | 3 +-- pyslurm/core/job/submission.pyx | 1 - pyslurm/core/node.pxd | 8 +------- pyslurm/core/node.pyx | 8 ++------ pyslurm/db/job.pyx | 6 +++--- pyslurm/db/step.pxd | 2 +- pyslurm/xcollections.pyx | 2 +- 12 files changed, 27 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df972286..d98dd77b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - All Collections (like [pyslurm.Jobs](https://pyslurm.github.io/23.2/reference/job/#pyslurm.Jobs)) inherit from `list` now instead of `dict` - `JobSearchFilter` has been renamed to `JobFilter` +- Renamed `as_dict` Function of some classes to `to_dict` ## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 565642ed..eab891415 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -6,4 +6,5 @@ /* Indentation. */ div.doc-contents:not(.first) { padding-left: 25px; + border-left: .05rem solid var(--md-typeset-table-color); } diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index 81442413..4eb89bde 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -88,7 +88,7 @@ cdef class Jobs(MultiClusterMap): This is the result of multiplying the run_time with the amount of cpus for each job. frozen (bool): - If this is set to True and the reload() method is called, then + If this is set to True and the `reload()` method is called, then *ONLY* Jobs that already exist in this collection will be reloaded. New Jobs that are discovered will not be added to this collection, but old Jobs which have already been purged from the @@ -113,15 +113,12 @@ cdef class Job: job_id (int): An Integer representing a Job-ID. - Raises: - MemoryError: If malloc fails to allocate memory. - Attributes: steps (JobSteps): Steps this Job has. Before you can access the Steps data for a Job, you have to call - the reload() method of a Job instance or the load_steps() method - of a Jobs collection. + the `reload()` method of a Job instance or the `load_steps()` + method of a Jobs collection. name (str): Name of the Job id (int): diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 8d357b85..e2915608 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -296,14 +296,14 @@ cdef class Job: Args: signal (Union[str, int]): Any valid signal which will be sent to the Job. Can be either - a str like 'SIGUSR1', or simply an int. + a str like `SIGUSR1`, or simply an [int][]. steps (str): Selects which steps should be signaled. Valid values for this - are: "all", "batch" and "children". The default value is - "children", where all steps except the batch-step will be + are: `all`, `batch` and `children`. The default value is + `children`, where all steps except the batch-step will be signaled. - The value "batch" in contrast means, that only the batch-step - will be signaled. With "all" every step is signaled. + The value `batch` in contrast means, that only the batch-step + will be signaled. With `all` every step is signaled. hurry (bool): If True, no burst buffer data will be staged out. The default value is False. @@ -421,9 +421,9 @@ cdef class Job: Args: mode (str): Determines in which mode the Job should be held. Possible - values are "user" or "admin". By default, the Job is held in - "admin" mode, meaning only an Administrator will be able to - release the Job again. If you specify the mode as "user", the + values are `user` or `admin`. By default, the Job is held in + `admin` mode, meaning only an Administrator will be able to + release the Job again. If you specify the mode as `user`, the User will also be able to release the job. Raises: @@ -465,7 +465,7 @@ cdef class Job: Args: hold (bool, optional): Controls whether the Job should be put in a held state or not. - Default for this is 'False', so it will not be held. + Default for this is `False`, so it will not be held. Raises: RPCError: When requeing the Job was not successful. @@ -1183,8 +1183,9 @@ cdef class Job: Return type may still be subject to change in the future Returns: - (dict): Resource layout, where the key is the name of the name and - its value another dict with the CPU-ids, memory and gres. + (dict): Resource layout, where the key is the name of the node and + the value another dict with the keys `cpu_ids`, `memory` and + `gres`. """ # The code for this function is a modified reimplementation from here: # https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L739 diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index ae2d9c48..489e9d64 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -50,7 +50,7 @@ from pyslurm.core.job.task_dist cimport TaskDistribution cdef class JobSteps(dict): - """A collection of [pyslurm.JobStep][] objects for a given Job. + """A [dict][] of [pyslurm.JobStep][] objects for a given Job. Raises: RPCError: When getting the Job steps from the slurmctld failed. @@ -79,9 +79,6 @@ cdef class JobStep: time_limit (int): Time limit in Minutes for this step. - Raises: - MemoryError: If malloc fails to allocate memory. - Attributes: id (Union[str, int]): The id for this step. diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index da212a4a..54cb8f59 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -207,7 +207,6 @@ cdef class JobStep: Raises: RPCError: When retrieving Step information from the slurmctld was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -250,7 +249,7 @@ cdef class JobStep: Args: signal (Union[str, int]): Any valid signal which will be sent to the Job. Can be either - a str like 'SIGUSR1', or simply an int. + a str like `SIGUSR1`, or simply an [int][]. Raises: RPCError: When sending the signal was not successful. diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index bf47105b..df33992b 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -81,7 +81,6 @@ cdef class JobSubmitDescription: Raises: RPCError: When the job submission was not successful. - MemoryError: If malloc failed to allocate enough memory. Examples: >>> import pyslurm diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 60d9928f..5167de78 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -84,9 +84,6 @@ cdef class Nodes(MultiClusterMap): Total amount of Watts consumed in this node collection. avg_watts (int): Amount of average watts consumed in this node collection. - - Raises: - MemoryError: If malloc fails to allocate memory. """ cdef: node_info_msg_t *info @@ -166,7 +163,7 @@ cdef class Node: memory_reserved_for_system (int): Raw Memory in Mebibytes reserved for the System not usable by Jobs. - temporary_disk_space_per_node (int): + temporary_disk (int): Amount of temporary disk space this node has, in Mebibytes. weight (int): Weight of the node in scheduling. @@ -224,9 +221,6 @@ cdef class Node: CPU Load on the Node. slurmd_port (int): Port the slurmd is listening on the node. - - Raises: - MemoryError: If malloc fails to allocate memory. """ cdef: node_info_t *info diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index cb22fcfc..eac1bfef 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -78,7 +78,6 @@ cdef class Nodes(MultiClusterMap): Raises: RPCError: When getting all the Nodes from the slurmctld failed. - MemoryError: If malloc fails to allocate memory. """ cdef: dict passwd = {} @@ -278,7 +277,6 @@ cdef class Node: Raises: RPCError: If requesting the Node information from the slurmctld was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -318,7 +316,7 @@ cdef class Node: Args: state (str, optional): An optional state the created Node should have. Allowed values - are "future" and "cloud". "future" is the default. + are `future` and `cloud`. `future` is the default. Returns: (pyslurm.Node): This function returns the current Node-instance @@ -326,7 +324,6 @@ cdef class Node: Raises: RPCError: If creating the Node was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -377,7 +374,6 @@ cdef class Node: Raises: RPCError: If deleting the Node was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -515,7 +511,7 @@ cdef class Node: return u64_parse(self.info.mem_spec_limit) @property - def temporary_disk_space(self): + def temporary_disk(self): return u32_parse(self.info.tmp_disk) @property diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index befd9515..905f206a 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -325,7 +325,7 @@ cdef class Jobs(MultiClusterMap): >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify(db_filter, changes) >>> print(modified_jobs) - >>> [9999] + [9999] In the above example, the changes will be automatically committed if successful. @@ -342,7 +342,7 @@ cdef class Jobs(MultiClusterMap): >>> >>> # Now you can first examine which Jobs have been modified >>> print(modified_jobs) - >>> [9999] + [9999] >>> # And then you can actually commit (or even rollback) the >>> # changes >>> db_conn.commit() @@ -463,7 +463,7 @@ cdef class Job: >>> import pyslurm >>> db_job = pyslurm.db.Job.load(10000) - In the above example, attribute like "script" and "environment" + In the above example, attributes like `script` and `environment` are not populated. You must explicitly request one of them to be loaded: diff --git a/pyslurm/db/step.pxd b/pyslurm/db/step.pxd index aef7120b..ab0ff70c 100644 --- a/pyslurm/db/step.pxd +++ b/pyslurm/db/step.pxd @@ -44,7 +44,7 @@ from pyslurm.db.tres cimport TrackableResources, TrackableResource cdef class JobSteps(dict): - """A collection of [pyslurm.db.JobStep][] objects""" + """A [dict][] of [pyslurm.db.JobStep][] objects""" pass diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index 0037ef46..917368aa 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -130,7 +130,7 @@ class KeysView(BaseView): class ItemsView(BaseView): """A simple Items View of a collection. - Returns a 2-tuple in the form of (key, value) when iterating. + Returns a 2-tuple in the form of `(key, value)` when iterating. Similarly, when checking whether this View contains an Item with the `in` operator, a 2-tuple must be used. From 18337cd6ec65e0aa8013c65575c486867b33272f Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 13 Jul 2023 22:28:53 +0200 Subject: [PATCH 27/28] wip --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d98dd77b..4f6dd4c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,9 +16,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - New attributes for a Database Job: - extra - failed_node -- Now possible to initialize a pyslurm.db.Jobs collection with existing job +- Now possible to initialize a [pyslurm.db.Jobs][] collection with existing job ids or pyslurm.db.Job objects - Added `as_dict` function to all Collections +- Added a new Base Class [MultiClusterMap][pyslurm.xcollections.MultiClusterMap] that some Collections inherit from. ### Fixed @@ -28,8 +29,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - the Job was older than a day ### Changed - -- All Collections (like [pyslurm.Jobs](https://pyslurm.github.io/23.2/reference/job/#pyslurm.Jobs)) inherit from `list` now instead of `dict` + - `JobSearchFilter` has been renamed to `JobFilter` - Renamed `as_dict` Function of some classes to `to_dict` From d4a7020c94a3241921b5e9b1a747a2967c8cfe1f Mon Sep 17 00:00:00 2001 From: tazend Date: Thu, 13 Jul 2023 22:46:20 +0200 Subject: [PATCH 28/28] wip --- pyslurm/xcollections.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index 917368aa..8be67d29 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -86,7 +86,7 @@ class MCKeysView(BaseView): """A Multi-Cluster Keys View Unlike KeysView, when iterating over an MCKeysView instance, this will - yield a 2-tuple in the form (cluster, key). + yield a 2-tuple in the form `(cluster, key)`. Similarly, when checking whether this View contains a Key with the `in` operator, a 2-tuple must be used in the form described above. @@ -163,7 +163,7 @@ class MCItemsView(BaseView): """A Multi-Cluster Items View. This differs from ItemsView in that it returns a 3-tuple in the form of - (cluster, key, value) when iterating. + `(cluster, key, value)` when iterating. Similarly, when checking whether this View contains an Item with the `in` operator, a 3-tuple must be used. @@ -462,7 +462,7 @@ cdef class MultiClusterMap: return ClustersView(self) def popitem(self): - """Remove and return a (key, value) pair as a 2-tuple""" + """Remove and return a `(key, value)` pair as a 2-tuple""" try: item = next(iter(self.values())) except StopIteration: