From 569706b9f389849fa18bce3ba14b80853fc0393a Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Thu, 1 Dec 2022 15:20:27 +0100 Subject: [PATCH 01/17] moved submodules to private modules --- pynest/nest/__init__.py | 18 +- pynest/nest/lib/_hl_api_connection_helpers.py | 307 ++++ pynest/nest/lib/_hl_api_connections.py | 359 ++++ pynest/nest/lib/_hl_api_exceptions.py | 220 +++ pynest/nest/lib/_hl_api_helper.py | 582 ++++++ pynest/nest/lib/_hl_api_info.py | 211 +++ pynest/nest/lib/_hl_api_models.py | 198 ++ pynest/nest/lib/_hl_api_nodes.py | 203 +++ pynest/nest/lib/_hl_api_parallel_computing.py | 127 ++ pynest/nest/lib/_hl_api_simulation.py | 342 ++++ pynest/nest/lib/_hl_api_spatial.py | 1607 +++++++++++++++++ pynest/nest/lib/_hl_api_types.py | 1221 +++++++++++++ pynest/nest/ll_api.py | 23 +- pynest/nest/logic/__init__.py | 2 +- pynest/nest/logic/_hl_api_logic.py | 54 + pynest/nest/math/__init__.py | 2 +- pynest/nest/math/_hl_api_math.py | 146 ++ pynest/nest/random/__init__.py | 2 +- pynest/nest/random/_hl_api_random.py | 130 ++ pynest/nest/server/__init__.py | 2 +- pynest/nest/server/_hl_api_server.py | 498 +++++ pynest/nest/spatial/__init__.py | 4 +- pynest/nest/spatial/_hl_api_spatial.py | 264 +++ pynest/nest/spatial_distributions/__init__.py | 2 +- .../_hl_api_spatial_distributions.py | 148 ++ pynest/nestkernel_api.pyx | 2 +- 26 files changed, 6652 insertions(+), 22 deletions(-) create mode 100644 pynest/nest/lib/_hl_api_connection_helpers.py create mode 100644 pynest/nest/lib/_hl_api_connections.py create mode 100644 pynest/nest/lib/_hl_api_exceptions.py create mode 100644 pynest/nest/lib/_hl_api_helper.py create mode 100644 pynest/nest/lib/_hl_api_info.py create mode 100644 pynest/nest/lib/_hl_api_models.py create mode 100644 pynest/nest/lib/_hl_api_nodes.py create mode 100644 pynest/nest/lib/_hl_api_parallel_computing.py create mode 100644 pynest/nest/lib/_hl_api_simulation.py create mode 100644 pynest/nest/lib/_hl_api_spatial.py create mode 100644 pynest/nest/lib/_hl_api_types.py create mode 100644 pynest/nest/logic/_hl_api_logic.py create mode 100644 pynest/nest/math/_hl_api_math.py create mode 100644 pynest/nest/random/_hl_api_random.py create mode 100644 pynest/nest/server/_hl_api_server.py create mode 100644 pynest/nest/spatial/_hl_api_spatial.py create mode 100644 pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py diff --git a/pynest/nest/__init__.py b/pynest/nest/__init__.py index 9bb1b9a59f..c0b91b3bbb 100644 --- a/pynest/nest/__init__.py +++ b/pynest/nest/__init__.py @@ -90,16 +90,16 @@ def __init__(self, name): self.__dict__.update(_original_module_attrs) # noqa # Import public APIs of submodules into the `nest.` namespace - _rel_import_star(self, ".lib.hl_api_connections") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_exceptions") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_info") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_models") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_nodes") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_parallel_computing") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_simulation") # noqa: F821 + _rel_import_star(self, ".lib._hl_api_connections") + _rel_import_star(self, ".lib._hl_api_exceptions") + _rel_import_star(self, ".lib._hl_api_info") + _rel_import_star(self, ".lib._hl_api_models") + _rel_import_star(self, ".lib._hl_api_nodes") + _rel_import_star(self, ".lib._hl_api_parallel_computing") + _rel_import_star(self, ".lib._hl_api_simulation") + _rel_import_star(self, ".lib._hl_api_spatial") + _rel_import_star(self, ".lib._hl_api_types") _rel_import_star(self, ".lib.hl_api_sonata") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_spatial") # noqa: F821 - _rel_import_star(self, ".lib.hl_api_types") # noqa: F821 # Lazy loaded modules. They are descriptors, so add them to the type object type(self).raster_plot = _lazy_module_property("raster_plot") # noqa: F821 diff --git a/pynest/nest/lib/_hl_api_connection_helpers.py b/pynest/nest/lib/_hl_api_connection_helpers.py new file mode 100644 index 0000000000..a19058f934 --- /dev/null +++ b/pynest/nest/lib/_hl_api_connection_helpers.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_connection_helpers.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +These are helper functions to ease the definition of the +Connect function. +""" + +import copy +import numpy as np + +from ..ll_api import * +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel +from ._hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter +from ._hl_api_exceptions import NESTErrors + +__all__ = [ + '_connect_layers_needed', + '_connect_spatial', + '_process_conn_spec', + '_process_spatial_projections', + '_process_syn_spec', +] + + +def _process_conn_spec(conn_spec): + """Processes the connectivity specifications from None, string or dictionary to a dictionary.""" + if conn_spec is None: + # Use default conn_spec + return {'rule': 'all_to_all'} + elif isinstance(conn_spec, str): + processed_conn_spec = {'rule': conn_spec} + return processed_conn_spec + elif isinstance(conn_spec, dict): + return conn_spec + else: + raise TypeError("conn_spec must be a string or dict") + + +def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_arrays): + """Processes the synapse specifications from None, string or dictionary to a dictionary.""" + syn_spec = copy.copy(syn_spec) + + if syn_spec is None: + # for use_connect_arrays, return "static_synapse" by default + if use_connect_arrays: + return {"synapse_model": "static_synapse"} + return syn_spec + + if isinstance(syn_spec, CollocatedSynapses): + return syn_spec + + if isinstance(syn_spec, str): + return {"synapse_model": syn_spec} + + rule = conn_spec['rule'] + if isinstance(syn_spec, dict): + if "synapse_model" in syn_spec and not isinstance(syn_spec["synapse_model"], str): + raise kernel.NESTError("'synapse_model' must be a string") + for key, value in syn_spec.items(): + # if value is a list, it is converted to a numpy array + if isinstance(value, (list, tuple)): + value = np.asarray(value) + + if isinstance(value, (np.ndarray, np.generic)): + if len(value.shape) == 1: + if rule == 'one_to_one': + if value.shape[0] != prelength: + if use_connect_arrays: + raise kernel.NESTError( + "'{}' has to be an array of dimension {}.".format(key, prelength)) + else: + raise kernel.NESTError( + "'{}' has to be an array of dimension {}, a scalar or a dictionary.".format( + key, prelength)) + else: + syn_spec[key] = value + elif rule == 'fixed_total_number': + if ('N' in conn_spec and value.shape[0] != conn_spec['N']): + raise kernel.NESTError( + "'{}' has to be an array of dimension {}, a scalar or a dictionary".format( + key, conn_spec['N'])) + else: + syn_spec[key] = value + else: + raise kernel.NESTError( + "'{}' has the wrong type. One-dimensional parameter arrays can only be used in " + "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format(key)) + + elif len(value.shape) == 2: + if rule == 'all_to_all': + if value.shape[0] != postlength or value.shape[1] != prelength: + raise kernel.NESTError( + "'{}' has to be an array of dimension {}x{} (n_target x n_sources), a scalar " + "or a dictionary.".format(key, postlength, prelength)) + else: + syn_spec[key] = value.flatten() + elif rule == 'fixed_indegree': + indegree = conn_spec['indegree'] + if value.shape[0] != postlength or \ + value.shape[1] != indegree: + raise kernel.NESTError( + "'{}' has to be an array of dimension {}x{} (n_target x indegree), a scalar " + "or a dictionary.".format(key, postlength, indegree)) + else: + syn_spec[key] = value.flatten() + elif rule == 'fixed_outdegree': + outdegree = conn_spec['outdegree'] + if value.shape[0] != prelength or \ + value.shape[1] != outdegree: + raise kernel.NESTError( + "'{}' has to be an array of dimension {}x{} (n_sources x outdegree), a scalar " + "or a dictionary.".format(key, prelength, outdegree)) + else: + syn_spec[key] = value.flatten() + else: + raise kernel.NESTError( + "'{}' has the wrong type. Two-dimensional parameter arrays can only be used in " + "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format(key)) + + # check that "synapse_model" is there for use_connect_arrays + if use_connect_arrays and "synapse_model" not in syn_spec: + syn_spec["synapse_model"] = "static_synapse" + + return syn_spec + + # If we get here, syn_spec is of illegal type. + raise TypeError("syn_spec must be a string, dict or CollocatedSynapses object") + + +def _process_spatial_projections(conn_spec, syn_spec): + """ + Processes the connection and synapse specifications to a single dictionary + for the SLI function `ConnectLayers`. + """ + allowed_conn_spec_keys = ['mask', 'allow_multapses', 'allow_autapses', 'rule', + 'indegree', 'outdegree', 'p', 'use_on_source', 'allow_oversized_mask'] + allowed_syn_spec_keys = ['weight', 'delay', 'synapse_model', 'synapse_label', 'receptor_type'] + for key in conn_spec.keys(): + if key not in allowed_conn_spec_keys: + raise ValueError("'{}' is not allowed in conn_spec when connecting with mask or kernel".format(key)) + + projections = {} + projections.update(conn_spec) + if 'p' in conn_spec: + projections['kernel'] = projections.pop('p') + if syn_spec is not None: + if isinstance(syn_spec, CollocatedSynapses): + for syn_list in syn_spec.syn_specs: + for key in syn_list.keys(): + if key not in allowed_syn_spec_keys: + raise ValueError( + "'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key)) + projections.update({'synapse_parameters': syn_spec.syn_specs}) + else: + for key in syn_spec.keys(): + if key not in allowed_syn_spec_keys: + raise ValueError("'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key)) + projections.update(syn_spec) + if conn_spec['rule'] == 'fixed_indegree': + if 'use_on_source' in conn_spec: + raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") + projections['connection_type'] = 'pairwise_bernoulli_on_source' + projections['number_of_connections'] = projections.pop('indegree') + elif conn_spec['rule'] == 'fixed_outdegree': + if 'use_on_source' in conn_spec: + raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") + projections['connection_type'] = 'pairwise_bernoulli_on_target' + projections['number_of_connections'] = projections.pop('outdegree') + elif conn_spec['rule'] == 'pairwise_bernoulli': + if ('use_on_source' in conn_spec and + conn_spec['use_on_source']): + projections['connection_type'] = 'pairwise_bernoulli_on_source' + projections.pop('use_on_source') + else: + projections['connection_type'] = 'pairwise_bernoulli_on_target' + if 'use_on_source' in projections: + projections.pop('use_on_source') + else: + raise kernel.NESTError("When using kernel or mask, the only possible connection rules are " + "'pairwise_bernoulli', 'fixed_indegree', or 'fixed_outdegree'") + projections.pop('rule') + return projections + + +def _connect_layers_needed(conn_spec, syn_spec): + """Determins if connection has to be made with the SLI function `ConnectLayers`.""" + if isinstance(conn_spec, dict): + # If a conn_spec entry is based on spatial properties, we must use ConnectLayers. + for key, item in conn_spec.items(): + if isinstance(item, Parameter) and item.is_spatial(): + return True + # We must use ConnectLayers in some additional cases. + rule_is_bernoulli = 'pairwise_bernoulli' in str(conn_spec['rule']) + if ('mask' in conn_spec or + ('p' in conn_spec and not rule_is_bernoulli) or + 'use_on_source' in conn_spec): + return True + # If a syn_spec entry is based on spatial properties, we must use ConnectLayers. + if isinstance(syn_spec, dict): + for key, item in syn_spec.items(): + if isinstance(item, Parameter) and item.is_spatial(): + return True + elif isinstance(syn_spec, CollocatedSynapses): + return any([_connect_layers_needed(conn_spec, syn_param) for syn_param in syn_spec.syn_specs]) + # If we get here, there is not need to use ConnectLayers. + return False + + +def _connect_spatial(pre, post, projections): + """Connect `pre` to `post` using the specifications in `projections`.""" + + def fixdict(d): + for k, v in d.items(): + if isinstance(v, dict): + d[k] = fixdict(v) + elif isinstance(v, Mask) or isinstance(v, Parameter): + d[k] = v._datum + return d + + nestkernel.llapi_connect_layers(pre, post, fixdict(projections)) + + +def _process_input_nodes(pre, post, conn_spec): + """ + Check the properties of `pre` and `post` nodes: + + * If `conn_spec` is 'one_to_one', no uniqueness check is performed; the + "regular" one-to-one connect is used if both inputs are NodeCollection, + "connect_arrays" is used otherwise. + * If both `pre` and `post` are NodeCollections or can be converted to + NodeCollections (i.e. contain unique IDs), then proceed to "regular" + connect (potentially after conversion to NodeCollection). + * If both `pre` and `post` are arrays and contain non-unique items, then + we proceed to "connect_arrays". + * If at least one of them has non-unique items and they have different + sizes, then raise an error. + """ + use_connect_arrays = False + + # check for 'one_to_one' conn_spec + one_to_one_cspec = (conn_spec if not isinstance(conn_spec, dict) + else conn_spec.get('rule', 'all_to_all') == 'one_to_one') + + # check and convert input types + pre_is_nc, post_is_nc = True, True + + if not isinstance(pre, NodeCollection): + # skip uniqueness check for connect_arrays compatible `conn_spec` + if not one_to_one_cspec and len(set(pre)) == len(pre): + pre = NodeCollection(pre) + else: + pre_is_nc = False + + if not isinstance(post, NodeCollection): + # skip uniqueness check for connect_arrays compatible `conn_spec` + if not one_to_one_cspec and len(set(post)) == len(post): + post = NodeCollection(post) + else: + post_is_nc = False + + if not pre_is_nc or not post_is_nc: + if len(pre) != len(post): + raise NESTErrors.ArgumentType( + "Connect", + "If `pre` or `post` contain non-unique IDs, then they must have the same length.") + + # convert to arrays + pre = np.asarray(pre) + post = np.asarray(post) + + # check array type + if not issubclass(pre.dtype.type, (int, np.integer)): + raise NESTErrors.ArgumentType("Connect", " `pre` IDs should be integers.") + + if not issubclass(post.dtype.type, (int, np.integer)): + raise NESTErrors.ArgumentType("Connect", " `post` IDs should be integers.") + + # check dimension + if not (pre.ndim == 1 and post.ndim == 1): + raise ValueError("Sources and targets must be 1-dimensional arrays") + + use_connect_arrays = True + + if use_connect_arrays and not one_to_one_cspec: + raise ValueError("When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'.") + + return use_connect_arrays, pre, post diff --git a/pynest/nest/lib/_hl_api_connections.py b/pynest/nest/lib/_hl_api_connections.py new file mode 100644 index 0000000000..16b2e02444 --- /dev/null +++ b/pynest/nest/lib/_hl_api_connections.py @@ -0,0 +1,359 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_connections.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions for connection handling +""" + +import numpy + +from ..ll_api import connect_arrays +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel + +from ._hl_api_connection_helpers import (_process_input_nodes, _connect_layers_needed, + _connect_spatial, _process_conn_spec, + _process_spatial_projections, _process_syn_spec) +from ._hl_api_nodes import Create +from ._hl_api_parallel_computing import NumProcesses +from ._hl_api_types import NodeCollection, SynapseCollection, Mask, Parameter + +__all__ = [ + 'Connect', + 'Disconnect', + 'GetConnections', +] + + +def GetConnections(source=None, target=None, synapse_model=None, + synapse_label=None): + """Return a `SynapseCollection` representing the connection identifiers. + + Any combination of `source`, `target`, `synapse_model` and + `synapse_label` parameters is permitted. + + Parameters + ---------- + source : NodeCollection, optional + Source node IDs, only connections from these + pre-synaptic neurons are returned + target : NodeCollection, optional + Target node IDs, only connections to these + postsynaptic neurons are returned + synapse_model : str, optional + Only connections with this synapse type are returned + synapse_label : int, optional + (non-negative) only connections with this synapse label are returned + + Returns + ------- + SynapseCollection: + Object representing the source-node_id, target-node_id, target-thread, synapse-id, port of connections, see + :py:class:`.SynapseCollection` for more. + + Raises + ------ + TypeError + + Notes + ----- + Only connections with targets on the MPI process executing + the command are returned. + """ + + params = {} + + if source is not None: + if isinstance(source, NodeCollection): + params['source'] = source + else: + raise TypeError("source must be NodeCollection.") + + if target is not None: + if isinstance(target, NodeCollection): + params['target'] = target + else: + raise TypeError("target must be NodeCollection.") + + if synapse_model is not None: + params['synapse_model'] = synapse_model + + if synapse_label is not None: + params['synapse_label'] = synapse_label + + conns = nestkernel.llapi_get_connections(params) + + return conns + + +def Connect(pre, post, conn_spec=None, syn_spec=None, + return_synapsecollection=False): + """ + Connect `pre` nodes to `post` nodes. + + Nodes in `pre` and `post` are connected using the specified connectivity + (`all-to-all` by default) and synapse type (:cpp:class:`static_synapse ` by default). + Details depend on the connectivity rule. + + Lists of synapse models and connection rules are available as + ``nest.synapse_models`` and ``nest.connection_rules``, respectively. + + Parameters + ---------- + pre : NodeCollection (or array-like object) + Presynaptic nodes, as object representing the IDs of the nodes + post : NodeCollection (or array-like object) + Postsynaptic nodes, as object representing the IDs of the nodes + conn_spec : str or dict, optional + Specifies connectivity rule, see below + syn_spec : str or dict, optional + Specifies synapse model, see below + return_synapsecollection: bool + Specifies whether or not we should return a :py:class:`.SynapseCollection` of pre and post connections + + Raises + ------ + kernel.NESTError + + Notes + ----- + It is possible to connect NumPy arrays of node IDs one-to-one by passing the arrays as `pre` and `post`, + specifying `'one_to_one'` for `conn_spec`. + In that case, the arrays may contain non-unique IDs. + You may also specify weight, delay, and receptor type for each connection as NumPy arrays in the `syn_spec` + dictionary. + This feature is currently not available when MPI is used; trying to connect arrays with more than one + MPI process will raise an error. + + If pre and post have spatial positions, a `mask` can be specified as a dictionary. The mask define which + nodes are considered as potential targets for each source node. Connections with spatial nodes can also + use `nest.spatial_distributions` as parameters, for instance for the probability `p`. + + **Connectivity specification (conn_spec)** + + Available rules and associated parameters:: + + - 'all_to_all' (default) + - 'one_to_one' + - 'fixed_indegree', 'indegree' + - 'fixed_outdegree', 'outdegree' + - 'fixed_total_number', 'N' + - 'pairwise_bernoulli', 'p' + - 'symmetric_pairwise_bernoulli', 'p' + + See :ref:`conn_rules` for more details, including example usage. + + **Synapse specification (syn_spec)** + + The synapse model and its properties can be given either as a string + identifying a specific synapse model (default: :cpp:class:`static_synapse `) or + as a dictionary specifying the synapse model and its parameters. + + Available keys in the synapse specification dictionary are:: + + - 'synapse_model' + - 'weight' + - 'delay' + - 'receptor_type' + - any parameters specific to the selected synapse model. + + See :ref:`synapse_spec` for details, including example usage. + + All parameters are optional and if not specified, the default values + of the synapse model will be used. The key 'synapse_model' identifies the + synapse model, this can be one of NEST's built-in synapse models + or a user-defined model created via :py:func:`.CopyModel`. + + If `synapse_model` is not specified the default model :cpp:class:`static_synapse ` + will be used. + + Distributed parameters can be defined through NEST's different parametertypes. NEST has various + random parameters, spatial parameters and distributions (only accesseable for nodes with spatial positions), + logical expressions and mathematical expressions, which can be used to define node and connection parameters. + + To see all available parameters, see documentation defined in distributions, logic, math, + random and spatial modules. + + See Also + --------- + :ref:`connection_management` + """ + use_connect_arrays, pre, post = _process_input_nodes(pre, post, conn_spec) + + # Converting conn_spec to dict, without putting it on the SLI stack. + processed_conn_spec = _process_conn_spec(conn_spec) + # If syn_spec is given, its contents are checked, and if needed converted + # to the right formats. + processed_syn_spec = _process_syn_spec( + syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays) + + # If pre and post are arrays of node IDs, and conn_spec is unspecified, + # the node IDs are connected one-to-one. + if use_connect_arrays: + if return_synapsecollection: + raise ValueError("SynapseCollection cannot be returned when connecting two arrays of node IDs") + + if processed_syn_spec is None: + raise ValueError("When connecting two arrays of node IDs, the synapse specification dictionary must " + "be specified and contain at least the synapse model.") + + # In case of misspelling + if "weights" in processed_syn_spec: + raise ValueError("To specify weights, use 'weight' in syn_spec.") + if "delays" in processed_syn_spec: + raise ValueError("To specify delays, use 'delay' in syn_spec.") + + weights = numpy.array(processed_syn_spec['weight']) if 'weight' in processed_syn_spec else None + delays = numpy.array(processed_syn_spec['delay']) if 'delay' in processed_syn_spec else None + + try: + synapse_model = processed_syn_spec['synapse_model'] + except KeyError: + raise ValueError("When connecting two arrays of node IDs, the synapse specification dictionary must " + "contain a synapse model.") + + # Split remaining syn_spec entries to key and value arrays + reduced_processed_syn_spec = {k: processed_syn_spec[k] + for k in set(processed_syn_spec.keys()).difference( + set(('weight', 'delay', 'synapse_model')))} + + if len(reduced_processed_syn_spec) > 0: + syn_param_keys = numpy.array(list(reduced_processed_syn_spec.keys()), dtype=numpy.string_) + syn_param_values = numpy.zeros([len(reduced_processed_syn_spec), len(pre)]) + + for i, value in enumerate(reduced_processed_syn_spec.values()): + syn_param_values[i] = value + else: + syn_param_keys = None + syn_param_values = None + + connect_arrays(pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values) + return + + if not isinstance(pre, NodeCollection): + raise TypeError("Not implemented, presynaptic nodes must be a NodeCollection") + if not isinstance(post, NodeCollection): + raise TypeError("Not implemented, postsynaptic nodes must be a NodeCollection") + + # In some cases we must connect with ConnectLayers instead. + if _connect_layers_needed(processed_conn_spec, processed_syn_spec): + # Check that pre and post are layers + if pre.spatial is None: + raise TypeError("Presynaptic NodeCollection must have spatial information") + if post.spatial is None: + raise TypeError("Presynaptic NodeCollection must have spatial information") + + # Create the projection dictionary + spatial_projections = _process_spatial_projections(processed_conn_spec, processed_syn_spec) + _connect_spatial(pre._datum, post._datum, spatial_projections) + else: + nestkernel.llapi_connect(pre._datum, post._datum, processed_conn_spec, processed_syn_spec) + + if return_synapsecollection: + return GetConnections(pre, post) + + +def Disconnect(*args, conn_spec=None, syn_spec=None): + """Disconnect connections in a SynnapseCollection, or `pre` neurons from `post` neurons. + + When specifying `pre` and `post` nodes, they are disconnected using the specified disconnection + rule (one-to-one by default) and synapse type (:cpp:class:`static_synapse ` by default). + Details depend on the disconnection rule. + + Parameters + ---------- + args : SynapseCollection or NodeCollections + Either a collection of connections to disconnect, or pre- and postsynaptic nodes given as `NodeCollection`s + conn_spec : str or dict + Disconnection rule when specifying pre- and postsynaptic nodes, see below + syn_spec : str or dict + Synapse specifications when specifying pre- and postsynaptic nodes, see below + + Notes + ------- + + **conn_spec** + + Apply the same rules as for connectivity specs in the :py:func:`.Connect` method + + Possible choices of the conn_spec are + :: + - 'one_to_one' + - 'all_to_all' + + **syn_spec** + + The synapse model and its properties can be specified either as a string naming + a synapse model (the list of all available synapse models can be gotten via + ``nest.synapse_models``) or as a dictionary as described below. + + Note that only the synapse type is checked when we disconnect and that if + `syn_spec` is given as a non-empty dictionary, the 'synapse_model' parameter must + be present. + + If no synapse model is specified the default model + :cpp:class:`static_synapse ` will be used. + + Available keys in the synapse dictionary are: + :: + + - 'synapse_model' + - 'weight' + - 'delay', + - 'receptor_type' + - parameters specific to the synapse model chosen + + 'synapse_model' determines the synapse type, taken from pre-defined synapse + types in NEST or manually specified synapses created via :py:func:`.CopyModel`. + + All other parameters are not currently implemented. + + Notes + ----- + `Disconnect` only disconnects explicitly specified nodes. + + """ + + if len(args) == 1: + synapsecollection = args[0] + if not isinstance(synapsecollection, SynapseCollection): + raise TypeError('Arguments must be either a SynapseCollection or two NodeCollections') + if conn_spec is not None or syn_spec is not None: + raise ValueError('When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified') + synapsecollection.disconnect() + elif len(args) == 2: + # Fill default values + conn_spec = 'one_to_one' if conn_spec is None else conn_spec + syn_spec = 'static_synapse' if syn_spec is None else syn_spec + if is_string(conn_spec): + conn_spec = {'rule': conn_spec} + if is_string(syn_spec): + syn_spec = {'synapse_model': syn_spec} + pre, post = args + if not isinstance(pre, NodeCollection) or not isinstance(post, NodeCollection): + raise TypeError('Arguments must be either a SynapseCollection or two NodeCollections') + sps(pre) + sps(post) + sps(conn_spec) + sps(syn_spec) + sr('Disconnect_g_g_D_D') + else: + raise TypeError('Arguments must be either a SynapseCollection or two NodeCollections') diff --git a/pynest/nest/lib/_hl_api_exceptions.py b/pynest/nest/lib/_hl_api_exceptions.py new file mode 100644 index 0000000000..cadd5797f9 --- /dev/null +++ b/pynest/nest/lib/_hl_api_exceptions.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_exceptions.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + + +class NESTMappedException(type): + """Metaclass for exception namespace that dynamically creates exception classes. + + If a class (self) of this (meta)-type has an unknown attribute requested, __getattr__ defined + below gets called, creating a class with that name (the error name) and with an __init__ taking + commandname and errormessage (as created in the source) which is a closure on the parent and + errorname as well, with a parent of default type (self.default_parent) or + self.parents[errorname] if defined. """ + + def __getattr__(cls, errorname): + """Creates a class of type "errorname" which is a child of cls.default_parent or + cls.parents[errorname] if one is defined. + + This __getattr__ function also stores the class permanently as an attribute of cls for + re-use where cls is actually the class that triggered the getattr (the class that + NESTMappedException is a metaclass of). """ + + # Dynamic class construction, first check if we know its parent + if errorname in cls.parents: + parent = getattr(cls, cls.parents[errorname]) + else: # otherwise, get the default (SLIException) + parent = cls.default_parent + + # and now dynamically construct the new class + # not NESTMappedException, since that would mean the metaclass would let the new class inherit + # this __getattr__, allowing unintended dynamic construction of attributes + newclass = type( + cls.__name__ + '.' + errorname, + (parent,), + { + '__init__': cls.init(parent, errorname), + '__doc__': + """Dynamically created exception {} from {}. + + Created for the namespace: {}. + Parent exception: {}. + """.format(errorname, cls.source, cls.__name__, parent.__name__) + } + ) + + # Cache for reuse: __getattr__ should now not get called if requested again + setattr(cls, errorname, newclass) + + # And now we return the exception + return newclass + + +class NESTErrors(metaclass=NESTMappedException): + """Namespace for NEST exceptions, including dynamically created classes from SLI. + + Dynamic exception creation is through __getattr__ defined in the metaclass NESTMappedException. + """ + + class NESTError(Exception): + """Base exception class for all NEST exceptions. + """ + + def __init__(self, message): + """Initializer for NESTError base class. + + Parameters: + ----------- + message: str + full error message to report. + """ + + Exception.__init__(self, message) + self.message = message + + class SLIException(NESTError): + """Base class for all exceptions coming from sli. + """ + + def __init__(self, commandname, errormessage, errorname='SLIException'): + """Initialize function. + + Parameters: + ----------- + errorname: error name from SLI. + commandname: command name from SLI. + errormessage: message from SLI. + """ + message = "{} in PyNEST function {}: {}".format(errorname, commandname, errormessage) + NESTErrors.NESTError.__init__(self, message) + + self.errorname = errorname + self.commandname = commandname + self.errormessage = errormessage + + class PyNESTError(NESTError): + """Exceptions produced from Python/Cython code. + """ + pass + + @staticmethod + def init(parent, errorname): + """ Static class method to construct init's for SLIException children. + + Construct our new init with closure on errorname (as a default value) and parent. + The default value allows the __init__ to be chained and set by the leaf child. + This also moves the paramerization of __init__ away from the class construction logic + and next to the SLIException init. + + Parameters: + ---------- + parent: the ancestor of the class needed to properly walk up the MRO (not possible with super() or + super(type,...) because of the dynamic creation of the function + (used as a closure on the constructed __init__). + errorname: the class name for information purposes + internally (used as a closure on the constructed __init__). + """ + + def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwargs): + # recursively init the parent class: all of this is only needed to properly set errorname + parent.__init__(self, commandname, errormessage, *args, errorname=errorname, **kwargs) + + docstring = \ + """Initialization function. + + Parameters: + ----------- + commandname: sli command name. + errormessage: sli error message. + errorname: set by default ("{}") or passed in by child (shouldn't be explicitly set + when creating an instance) + *args, **kwargs: passed through to base class. + + self will be a descendant of {}. + """.format(errorname, parent.__name__) + + try: + __init__.__doc__ = docstring + except AttributeError: + __init__.__func__.__doc__ = docstring + + return __init__ + + # source: the dynamically created exceptions come from SLI + # default_parent: the dynamically created exceptions are descended from SLIExcepton + # parents: unless they happen to be mapped in this list to another exception descended from SLIException + # these should be updated when new exceptions in sli are created that aren't directly descended + # from SLIException (but nothing bad will happen, it's just that otherwise they'll be directly + # descended from SLIException instead of an intermediate exception; they'll still be constructed + # and useable) + source = "SLI" + default_parent = SLIException + parents = { + 'TypeMismatch': 'InterpreterError', + 'SystemSignal': 'InterpreterError', + 'RangeCheck': 'InterpreterError', + 'ArgumentType': 'InterpreterError', + 'BadParameterValue': 'SLIException', + 'DictError': 'InterpreterError', + 'UndefinedName': 'DictError', + 'EntryTypeMismatch': 'DictError', + 'StackUnderflow': 'InterpreterError', + 'IOError': 'SLIException', + 'UnaccessedDictionaryEntry': 'DictError', + 'UnknownModelName': 'KernelException', + 'NewModelNameExists': 'KernelException', + 'ModelInUse': 'KernelException', + 'UnknownSynapseType': 'KernelException', + 'UnknownNode': 'KernelException', + 'NoThreadSiblingsAvailable': 'KernelException', + 'LocalNodeExpected': 'KernelException', + 'NodeWithProxiesExpected': 'KernelException', + 'UnknownReceptorType': 'KernelException', + 'IncompatibleReceptorType': 'KernelException', + 'UnknownPort': 'KernelException', + 'IllegalConnection': 'KernelException', + 'InexistentConnection': 'KernelException', + 'UnknownThread': 'KernelException', + 'BadDelay': 'KernelException', + 'UnexpectedEvent': 'KernelException', + 'UnsupportedEvent': 'KernelException', + 'BadProperty': 'KernelException', + 'BadParameter': 'KernelException', + 'DimensionMismatch': 'KernelException', + 'DistributionError': 'KernelException', + 'InvalidDefaultResolution': 'KernelException', + 'InvalidTimeInModel': 'KernelException', + 'StepMultipleRequired': 'KernelException', + 'TimeMultipleRequired': 'KernelException', + 'GSLSolverFailure': 'KernelException', + 'NumericalInstability': 'KernelException', + 'KeyError': 'KernelException', + 'MUSICPortUnconnected': 'KernelException', + 'MUSICPortHasNoWidth': 'KernelException', + 'MUSICPortAlreadyPublished': 'KernelException', + 'MUSICSimulationHasRun': 'KernelException', + 'MUSICChannelUnknown': 'KernelException', + 'MUSICPortUnknown': 'KernelException', + 'MUSICChannelAlreadyMapped': 'KernelException' + } + + +# So we don't break any code that currently catches a nest.NESTError +NESTError = NESTErrors.NESTError diff --git a/pynest/nest/lib/_hl_api_helper.py b/pynest/nest/lib/_hl_api_helper.py new file mode 100644 index 0000000000..425bbe6b38 --- /dev/null +++ b/pynest/nest/lib/_hl_api_helper.py @@ -0,0 +1,582 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_helper.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +These are helper functions to ease the definition of the high-level +API of the PyNEST wrapper. +""" + +import warnings +import json +import functools +import textwrap +import subprocess +import os +import re +import shlex +import sys +import numpy +import pydoc + +from string import Template + +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel +import nest + +__all__ = [ + 'broadcast', + 'deprecated', + 'get_parameters', + 'get_parameters_hierarchical_addressing', + 'get_wrapped_text', + 'is_coercible_to_sli_array', + 'is_iterable', + 'is_sequence_of_connections', + 'is_sequence_of_node_ids', + 'load_help', + 'model_deprecation_warning', + 'restructure_data', + 'show_deprecation_warning', + 'show_help_with_pager', + 'SuppressedDeprecationWarning', + 'uni_str', +] + +# These flags are used to print deprecation warnings only once. +# Only flags for special cases need to be entered here, such as special models +# or function parameters, all flags for deprecated functions will be registered +# by the @deprecated decorator, and therefore does not manually need to be placed here. +_deprecation_warning = {'deprecated_model': {'deprecation_issued': False, + 'replacement': 'replacement_mod'}, + 'iaf_psc_alpha_canon': {'deprecation_issued': False, + 'replacement': 'iaf_psc_alpha_ps'}, + 'pp_pop_psc_delta': {'deprecation_issued': False, + 'replacement': 'gif_pop_psc_exp'}} + + +def format_Warning(message, category, filename, lineno, line=None): + """Formats deprecation warning.""" + + return '%s:%s: %s:%s\n' % (filename, lineno, category.__name__, message) + + +warnings.formatwarning = format_Warning + + +def get_wrapped_text(text, width=80): + """Formats a given multiline string to wrap at a given width, while + preserving newlines (and removing excessive whitespace). + + Parameters + ---------- + text : str + String to format + + Returns + ------- + str: + Wrapped string + """ + + lines = text.split("\n") + lines = [textwrap.fill(" ".join(line.split()), width=width) for line in lines] + return "\n".join(lines) + + +def show_deprecation_warning(func_name, alt_func_name=None, text=None): + """Shows a deprecation warning for a function. + + Parameters + ---------- + func_name : str + Name of the deprecated function + alt_func_name : str, optional + Name of the function to use instead. Needed if text=None + text : str, optional + Text to display instead of standard text + """ + if func_name in _deprecation_warning: + if not _deprecation_warning[func_name]['deprecation_issued']: + if text is None: + text = ("{0} is deprecated and will be removed in a future version of NEST.\n" + "Please use {1} instead!").format(func_name, alt_func_name) + text = get_wrapped_text(text) + + warnings.warn('\n' + text) # add LF so text starts on new line + _deprecation_warning[func_name]['deprecation_issued'] = True + + +# Since we need to pass extra arguments to the decorator, we need a +# decorator factory. See http://stackoverflow.com/questions/15564512 +def deprecated(alt_func_name, text=None): + """Decorator for deprecated functions. + + Shows a warning and calls the original function. + + Parameters + ---------- + alt_func_name : str, optional + Name of the function to use instead, may be empty string + text : str, optional + Text to display instead of standard text + + Returns + ------- + function: + Decorator function + """ + + def deprecated_decorator(func): + _deprecation_warning[func.__name__] = {'deprecation_issued': False} + + @functools.wraps(func) + def new_func(*args, **kwargs): + show_deprecation_warning(func.__name__, alt_func_name, text=text) + return func(*args, **kwargs) + return new_func + + return deprecated_decorator + + +def is_iterable(seq): + """Return True if the given object is an iterable, False otherwise. + + Parameters + ---------- + seq : object + Object to check + + Returns + ------- + bool: + True if object is an iterable + """ + + try: + iter(seq) + except TypeError: + return False + + return True + + +def is_coercible_to_sli_array(seq): + """Checks whether a given object is coercible to a SLI array + + Parameters + ---------- + seq : object + Object to check + + Returns + ------- + bool: + True if object is coercible to a SLI array + """ + + import sys + + if sys.version_info[0] >= 3: + return isinstance(seq, (tuple, list, range)) + else: + return isinstance(seq, (tuple, list, xrange)) + + +def is_sequence_of_connections(seq): + """Checks whether low-level API accepts seq as a sequence of + connections. + + Parameters + ---------- + seq : object + Object to check + + Returns + ------- + bool: + True if object is an iterable of dictionaries or + subscriptables of CONN_LEN + """ + + try: + cnn = next(iter(seq)) + return isinstance(cnn, dict) or len(cnn) == kernel.CONN_LEN + except TypeError: + pass + + return False + + +def is_sequence_of_node_ids(seq): + """Checks whether the argument is a potentially valid sequence of + node IDs (non-negative integers). + + Parameters + ---------- + seq : object + Object to check + + Returns + ------- + bool: + True if object is a potentially valid sequence of node IDs + """ + + return all(isinstance(n, int) and n >= 0 for n in seq) + + +def broadcast(item, length, allowed_types, name="item"): + """Broadcast item to given length. + + Parameters + ---------- + item : object + Object to broadcast + length : int + Length to broadcast to + allowed_types : list + List of allowed types + name : str, optional + Name of item + + Returns + ------- + object: + The original item broadcasted to sequence form of length + + Raises + ------ + TypeError + + + """ + + if isinstance(item, allowed_types): + return length * (item, ) + elif len(item) == 1: + return length * item + elif len(item) != length: + raise TypeError( + "'{0}' must be a single value, a list with one element or a list with {1} elements.".format(name, length)) + return item + + +def __show_help_in_modal_window(obj, help_text): + """Open modal window with help text + + Parameters + ---------- + obj : string + The filename of the help file + help_text : string + Full help_text + """ + + help_text = json.dumps(help_text) + style = "" + s = Template(""" + require( + ["base/js/dialog"], + function(dialog) { + dialog.modal({ + title: '$jstitle', + body: $jstext, + buttons: { + 'close': {} + } + }); + } + ); + """) + + from IPython.display import HTML, Javascript, display + display(HTML(style)) + display(Javascript(s.substitute(jstitle=obj, jstext=help_text))) + + +def get_help_fname(obj): + """Get file name for help object + + Raises FileNotFound if no help is available for ``obj``. + + Parameters + ---------- + obj : string + Object to get help filename for + + Returns + ------- + string: + File name of the help text for obj + """ + + docdir = sli_func("statusdict/prgdocdir ::") + help_fname = os.path.join(docdir, 'html', 'models', f'{obj}.rst') + + if os.path.isfile(help_fname): + return help_fname + else: + raise FileNotFoundError(f"Sorry, there is no help for '{obj}'.") + + +def load_help(obj): + """Returns documentation of the given object in RST format + + Parameters + ---------- + obj : string + Object to display help for + + Returns + ------- + string: + The documentation of the object or None if no help is available + """ + + help_fname = get_help_fname(obj) + with open(help_fname, 'r', encoding='utf-8') as help_file: + help_text = help_file.read() + return help_text + + +def show_help_with_pager(obj): + """Display help text for the given object in the Python pager + + If called from within a Jupyter notebook, display help in a modal + window instead of in the pager. + + Parameters + ---------- + obj : object + Object to display + + """ + + def check_nb(): + try: + return get_ipython().__class__.__name__.startswith('ZMQ') + except NameError: + return False + + help_text = load_help(obj) + + if check_nb(): + __show_help_in_modal_window(obj + '.rst', help_text) + return + + pydoc.pager(help_text) + + +def __is_executable(path, candidate): + """Returns true for executable files.""" + + candidate = os.path.join(path, candidate) + return os.access(candidate, os.X_OK) and os.path.isfile(candidate) + + +def model_deprecation_warning(model): + """Checks whether the model is to be removed in a future version of NEST. + If so, a deprecation warning is issued. + + Parameters + ---------- + model: str + Name of model + """ + + if model in _deprecation_warning: + if not _deprecation_warning[model]['deprecation_issued']: + text = ("The {0} model is deprecated and will be removed in a future version of NEST, " + "use {1} instead.").format(model, _deprecation_warning[model]['replacement']) + show_deprecation_warning(model, text=text) + + +def restructure_data(result, keys): + """ + Restructure list of status dictionaries or list of parameter values to dict with lists or single list or int. + + Parameters + ---------- + result: list + list of status dictionaries or list (of lists) of parameter values. + keys: string or list of strings + name(s) of properties + + Returns + ------- + int, list or dict + """ + + if isinstance(keys, str): + if len(result) != 1: + all_keys = sorted({key for result_dict in result for key in result_dict}) + final_result = [] + + for result_dict in result: + if keys in result_dict.keys(): + final_result.append(result_dict[keys]) + elif keys in all_keys: + final_result.append(None) + final_result = tuple(final_result) + else: + final_result = result[0][keys] + + elif is_iterable(keys): + final_result = ({key: [val[i] for val in result] + for i, key in enumerate(keys)} if len(result) != 1 + else {key: val[i] for val in result + for i, key in enumerate(keys)}) + + elif keys is None: + if len(result) != 1: + all_keys = sorted({key for result_dict in result for key in result_dict}) + final_result = {} + + for key in all_keys: + final_result[key] = [] + for result_dict in result: + if key in result_dict.keys(): + final_result[key].append(result_dict[key]) + else: + final_result[key].append(None) + else: + final_result = {key: result_dict[key] for result_dict in result for key in result[0]} + return final_result + + +def get_parameters(nc, param): + """ + Get parameters from nodes. + + Used by NodeCollections `get()` function. + + Parameters + ---------- + nc: NodeCollection + nodes to get values from + param: string or list of strings + string or list of string naming model properties. + + Returns + ------- + int, list: + param is a string so the value(s) is returned + dict: + param is a list of string so a dictionary is returned + """ + # param is single literal + if isinstance(param, str): + result = nestkernel.llapi_get_nc_status(nc._datum, param) + elif is_iterable(param): + result = {param_name: get_parameters(nc, param_name) for param_name in param} + else: + raise TypeError("Params should be either a string or an iterable") + + return result + + +def get_parameters_hierarchical_addressing(nc, params): + """ + Get parameters from nodes, hierarchical case. + + Used by NodeCollections `get()` function. + + Parameters + ---------- + nc: NodeCollection + nodes to get values from + params: tuple + first value in the tuple should be a string, second can be a string or a list of string. + The first value corresponds to the path into the hierarchical structure + while the second value corresponds to the name(s) of the desired + properties. + + Returns + ------- + int, list: + params[-1] is a string so the value(s) is returned + dict: + params[-1] is a list of string so a dictionary is returned + """ + + # Right now, NEST only allows get(arg0, arg1) for hierarchical + # addressing, where arg0 must be a string and arg1 can be string + # or list of strings. + if isinstance(params[0], str): + value_list = nc.get(params[0]) + if type(value_list) != tuple: + value_list = (value_list,) + else: + raise TypeError('First argument must be a string, specifying path into hierarchical dictionary') + + result = restructure_data(value_list, None) + + if isinstance(params[-1], str): + result = result[params[-1]] + else: + result = {key: result[key] for key in params[-1]} + return result + + +class SuppressedDeprecationWarning: + """ + Context manager turning off deprecation warnings for given methods. + + Think thoroughly before use. This context should only be used as a way to + make sure examples do not display deprecation warnings, that is, used in + functions called from examples, and not as a way to make tedious + deprecation warnings dissapear. + """ + + def __init__(self, no_dep_funcs): + """ + Parameters + ---------- + no_dep_funcs: Function name (string) or iterable of function names + for which to suppress deprecation warnings + """ + + self._no_dep_funcs = (no_dep_funcs if not isinstance(no_dep_funcs, str) else (no_dep_funcs, )) + self._deprecation_status = {} + sr('verbosity') # Use sli-version as we cannon import from info because of cirular inclusion problem + self._verbosity_level = spp() + + def __enter__(self): + + for func_name in self._no_dep_funcs: + self._deprecation_status[func_name] = _deprecation_warning[func_name] # noqa + _deprecation_warning[func_name]['deprecation_issued'] = True + + # Suppress only if verbosity level is deprecated or lower + if self._verbosity_level <= sli_func('M_DEPRECATED'): + # Use sli-version as we cannon import from info because of cirular inclusion problem + sr("{} setverbosity".format(sli_func('M_WARNING'))) + + def __exit__(self, *args): + + # Reset the verbosity level and deprecation warning status + sr("{} setverbosity".format((self._verbosity_level))) + + for func_name, deprec_dict in self._deprecation_status.items(): + _deprecation_warning[func_name]['deprecation_issued'] = ( + deprec_dict['deprecation_issued']) diff --git a/pynest/nest/lib/_hl_api_info.py b/pynest/nest/lib/_hl_api_info.py new file mode 100644 index 0000000000..3b9a0c8d1d --- /dev/null +++ b/pynest/nest/lib/_hl_api_info.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_info.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions to get information on NEST. +""" + +import sys +import os +import textwrap +import webbrowser + +from ._hl_api_helper import broadcast, is_iterable, load_help, show_help_with_pager +from ._hl_api_types import to_json +from .. import nestkernel_api as nestkernel +import nest + +__all__ = [ + 'authors', + 'get_argv', + 'get_verbosity', + 'help', + 'helpdesk', + 'message', + 'set_verbosity', + 'sysinfo', +] + + +def sysinfo(): + """Print information on the platform on which NEST was compiled. + + """ + + sr("sysinfo") + + +def authors(): + """Print the authors of NEST. + + """ + + sr("authors") + + +def helpdesk(): + """Open the NEST documentation index in a browser. + + This command opens the NEST documentation index page using the + system's default browser. + + Please note that the help pages will only be available if you ran + ``make html`` prior to installing NEST. For more details, see + :ref:`doc_workflow`. + + """ + + docdir = sli_func("statusdict/prgdocdir ::") + help_fname = os.path.join(docdir, 'html', 'index.html') + + if not os.path.isfile(help_fname): + msg = "Sorry, the help index cannot be opened. " + msg += "Did you run 'make html' before running 'make install'?" + raise FileNotFoundError(msg) + + webbrowser.open_new(f"file://{help_fname}") + + +def help(obj=None, return_text=False): + """Display the help page for the given object in a pager. + + If ``return_text`` is omitted or explicitly given as ``False``, + this command opens the help text for ``object`` in the default + pager using the ``pydoc`` module. + + If ``return_text`` is ``True``, the help text is returned as a + string in reStructuredText format instead of displaying it. + + Parameters + ---------- + obj : object, optional + Object to display help for + return_text : bool, optional + Option for returning the help text + + Returns + ------- + None or str + The help text of the object if `return_text` is `True`. + + """ + + if obj is not None: + try: + if return_text: + return load_help(obj) + else: + show_help_with_pager(obj) + except FileNotFoundError: + print(textwrap.dedent(f""" + Sorry, there is no help for model '{obj}'. + Use the Python help() function to obtain help on PyNEST functions.""")) + else: + print(nest.__doc__) + + +def get_argv(): + """Return argv as seen by NEST. + + This is similar to Python :code:`sys.argv` but might have changed after + MPI initialization. + + Returns + ------- + tuple + Argv, as seen by NEST + + """ + + sr('statusdict') + statusdict = spp() + return statusdict['argv'] + + +def message(level, sender, text): + """Print a message using message system of NEST. + + Parameters + ---------- + level : + Level + sender : + Message sender + text : str + Text to be sent in the message + + """ + + sps(level) + sps(sender) + sps(text) + sr('message') + + +def get_verbosity(): + """Return verbosity level of NEST's messages. + + - M_ALL=0, display all messages + - M_INFO=10, display information messages and above + - M_DEPRECATED=18, display deprecation warnings and above + - M_WARNING=20, display warning messages and above + - M_ERROR=30, display error messages and above + - M_FATAL=40, display failure messages and above + + Returns + ------- + int: + The current verbosity level + """ + + sr('verbosity') + return spp() + + +def set_verbosity(level): + """Change verbosity level for NEST's messages. + + - M_ALL=0, display all messages + - M_INFO=10, display information messages and above + - M_DEPRECATED=18, display deprecation warnings and above + - M_WARNING=20, display warning messages and above + - M_ERROR=30, display error messages and above + - M_FATAL=40, display failure messages and above + + .. note:: + + To suppress the usual output when NEST starts up (e.g., the welcome message and + version information), you can run ``export PYNEST_QUIET=1`` on the command + line before executing your simulation script. + + Parameters + ---------- + level : str, default: 'M_INFO' + Can be one of 'M_FATAL', 'M_ERROR', 'M_WARNING', 'M_DEPRECATED', + 'M_INFO' or 'M_ALL'. + """ + + # TODO-PYNEST-NG: There are no SLI messages anymore, so verbosity + # is now irrelevant and should be replaced when a + # replacement for message() exists. + + # sr("{} setverbosity".format(level)) + pass diff --git a/pynest/nest/lib/_hl_api_models.py b/pynest/nest/lib/_hl_api_models.py new file mode 100644 index 0000000000..ab7c324b6d --- /dev/null +++ b/pynest/nest/lib/_hl_api_models.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_models.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions for model handling +""" + +from ..ll_api import * +from .. import nestkernel_api as nestkernel +from ._hl_api_helper import deprecated, is_iterable, model_deprecation_warning +from ._hl_api_types import to_json + +__all__ = [ + 'ConnectionRules', + 'CopyModel', + 'GetDefaults', + 'Models', + 'SetDefaults', +] + + +@deprecated("nest.node_models or nest.synapse_models") +@check_stack +def Models(mtype="all", sel=None): + """Return a tuple of neuron, device, or synapse model names. + + Parameters + ---------- + mtype : str, optional + Use ``mtype='nodes'`` to only get neuron and device models, + or ``mtype='synapses'`` to only get synapse models. + sel : str, optional + Filter results and only return models containing ``sel``. + + Returns + ------- + tuple + Available model names, sorted by name + + Raises + ------ + ValueError + Description + + Notes + ----- + - Synapse model names ending in ``_hpc`` require less memory because of + thread-local indices for target neuron IDs and fixed ``rport``s of 0. + - Synapse model names ending in ``_lbl`` allow to assign an integer label + (``synapse_label``) to each individual synapse, at the cost of increased + memory requirements. + + """ + + if mtype not in ("all", "nodes", "synapses"): + raise ValueError("mtype has to be one of 'all', 'nodes', or 'synapses'") + + models = [] + + if mtype in ("all", "nodes"): + models += GetKernelStatus('node_models') + + if mtype in ("all", "synapses"): + models += GetKernelStatus('synapse_models') + + if sel is not None: + models = [x for x in models if sel in x] + + models.sort() + + return tuple(models) + + +@deprecated("nest.connection_rules") +@check_stack +def ConnectionRules(): + """Return a tuple of all available connection rules, sorted by name. + + Returns + ------- + tuple + Available connection rules, sorted by name + + """ + + return tuple(sorted(GetKernelStatus('connection_rules'))) + + +@check_stack +def SetDefaults(model, params, val=None): + """Set defaults for the given model or recording backend. + + New default values are used for all subsequently created instances + of the model. + + Parameters + ---------- + model : str + Name of the model or recording backend + params : str or dict + Dictionary of new default parameter values + val : str, optional + If given, ``params`` has to be the name of a parameter. + + """ + + if val is not None: + if isinstance(params, str): + params = {params: val} + + nestkernel.llapi_set_defaults(model, params) + + +@check_stack +def GetDefaults(model, keys=None, output=''): + """Return defaults of the given model or recording backend. + + Parameters + ---------- + model : str + Name of the model or recording backend + keys : str or list, optional + String or a list of strings naming model properties. `GetDefaults` then + returns a single value or a list of values belonging to the keys + given. + output : str, optional + Whether the returned data should be in a format + (``output='json'``). Default is ''. + + Returns + ------- + dict + A dictionary of default parameters. + type + If keys is a string, the corrsponding default parameter is returned. + list + If keys is a list of strings, a list of corrsponding default parameters + is returned. + str : + If `output` is ``json``, returns parameters in JSON format. + + Raises + ------ + TypeError + + """ + + result = nestkernel.llapi_get_defaults(model) + + if keys is not None: + if is_iterable(keys) and not isinstance(keys, str): + result = [result[key] for key in keys] + else: + result = result[keys] + + if output == 'json': + result = to_json(result) + + return result + + +@check_stack +def CopyModel(existing, new, params=None): + """Create a new model by copying an existing one. + + Parameters + ---------- + existing : str + Name of existing model + new : str + Name of the copied model + params : dict, optional + Default parameters assigned to the copy. Not provided parameters are + taken from the existing model. + + """ + + model_deprecation_warning(existing) + + nestkernel.llapi_copy_model(existing, new, {} if params is None else params) diff --git a/pynest/nest/lib/_hl_api_nodes.py b/pynest/nest/lib/_hl_api_nodes.py new file mode 100644 index 0000000000..2a4642a0a5 --- /dev/null +++ b/pynest/nest/lib/_hl_api_nodes.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_nodes.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions for node handling +""" + +import warnings + +import nest +from ..ll_api import * +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel +from ._hl_api_helper import is_iterable, model_deprecation_warning +from ._hl_api_types import NodeCollection, Parameter + +__all__ = [ + 'Create', + 'GetLocalNodeCollection', + 'GetNodes', + 'PrintNodes', +] + + +def Create(model, n=1, params=None, positions=None): + """Create one or more nodes. + + Generates `n` new network objects of the supplied model type. If `n` is not + given, a single node is created. Note that if setting parameters of the + nodes fail, the nodes will still have been created. + + Note + ---- + During network construction, create all nodes representing model neurons first, then all nodes + representing devices (generators, recorders, or detectors), or all devices first and then all neurons. + Otherwise, network connection can be slow, especially in parallel simulations of networks + with many devices. + + Parameters + ---------- + model : str + Name of the model to create + n : int, optional + Number of nodes to create + params : dict or list, optional + Parameters for the new nodes. Can be any of the following: + + - A dictionary with either single values or lists of size n. + The single values will be applied to all nodes, while the lists will be distributed across + the nodes. Both single values and lists can be given at the same time. + - A list with n dictionaries, one dictionary for each node. + Values may be :py:class:`.Parameter` objects. If omitted, + the model's defaults are used. + positions: :py:class:`.spatial.grid` or :py:class:`.spatial.free` object, optional + Object describing spatial positions of the nodes. If omitted, the nodes have no spatial attachment. + + Returns + ------- + NodeCollection: + Object representing the IDs of created nodes, see :py:class:`.NodeCollection` for more. + + Raises + ------ + NESTError + If setting node parameters fail. However, the nodes will still have + been created. + TypeError + If the positions object is of wrong type. + """ + + model_deprecation_warning(model) + + # If any of the elements in the parameter dictionary is either an array-like object, + # or a NEST parameter, we create the nodes first, then set the given values. If not, + # we can pass the parameter specification to SLI when the nodes are created. + iterable_or_parameter_in_params = True + + if not isinstance(n, int): + raise TypeError('n must be an integer') + + # PYNEST-NG: can we support the usecase above by passing the dict into ll_create? + if isinstance(params, dict) and params: # if params is a dict and not empty + iterable_or_parameter_in_params = any(is_iterable(v) or isinstance(v, Parameter) for k, v in params.items()) + + if positions is not None: + # Explicitly retrieve lazy loaded spatial property from the module class. + # This is needed because the automatic lookup fails. See #2135. + spatial = getattr(nest.NestModule, "spatial") + # We only accept positions as either a free object or a grid object. + if not isinstance(positions, (spatial.free, spatial.grid)): + raise TypeError('`positions` must be either a nest.spatial.free or a nest.spatial.grid object') + layer_specs = {'elements': model} + layer_specs['edge_wrap'] = positions.edge_wrap + if isinstance(positions, spatial.free): + layer_specs['positions'] = positions.pos + # If the positions are based on a parameter object, the number of nodes must be specified. + if isinstance(positions.pos, Parameter): + layer_specs['n'] = n + else: + # If positions is not a free object, it must be a grid object. + if n > 1: + raise kernel.NESTError('Cannot specify number of nodes with grid positions') + layer_specs['shape'] = positions.shape + if positions.center is not None: + layer_specs['center'] = positions.center + if positions.extent is not None: + layer_specs['extent'] = positions.extent + + layer = nestkernel.llapi_create_spatial(layer_specs) + layer.set(params if params else {}) + return layer + + node_ids = nestkernel.llapi_create(model, n) + + if isinstance(params, dict) and params: # if params is a dict and not empty + try: + node_ids.set(params) + except Exception: + warnings.warn("Setting node parameters failed, but nodes have already been " + + f"created! The node IDs of the new nodes are: {node_ids}.") + raise + + return node_ids + + +def PrintNodes(): + """Print the `node ID` ranges and `model names` of all the nodes in the network.""" + + print(nestkernel.llapi_print_nodes()) + + +def GetNodes(properties={}, local_only=False): + """Return all nodes with the given properties as `NodeCollection`. + + Parameters + ---------- + properties : dict, optional + Only node IDs of nodes matching the properties given in the + dictionary exactly will be returned. Matching properties with float + values (e.g. the membrane potential) may fail due to tiny numerical + discrepancies and should be avoided. Note that when a params dict is + present, thread parallelization is not possible, the function will + be run thread serial. + local_only : bool, optional + If True, only node IDs of nodes simulated on the local MPI process will + be returned. By default, node IDs of nodes in the entire simulation + will be returned. This requires MPI communication and may slow down + the script. + + Returns + ------- + NodeCollection: + `NodeCollection` of nodes + """ + + return nestkernel.llapi_get_nodes(properties, local_only) + + +def GetLocalNodeCollection(nc): + """Get local nodes of a `NodeCollection` as a new `NodeCollection`. + + This function returns the local nodes of a `NodeCollection`. If there are no + local elements, an empty `NodeCollection` is returned. + + Parameters + ---------- + nc: NodeCollection + `NodeCollection` for which to get local nodes + + Returns + ------- + NodeCollection: + Object representing the local nodes of the given `NodeCollection` + """ + if not isinstance(nc, NodeCollection): + raise TypeError("GetLocalNodeCollection requires a NodeCollection in order to run") + + rank = Rank() + num_procs = NumProcesses() + first_in_nc = nc[0].global_id + first_index = ((rank - first_in_nc % num_procs) + num_procs) % num_procs + if first_index <= len(nc): + return nc[first_index:len(nc):num_procs] + else: + return NodeCollection([]) diff --git a/pynest/nest/lib/_hl_api_parallel_computing.py b/pynest/nest/lib/_hl_api_parallel_computing.py new file mode 100644 index 0000000000..a519171380 --- /dev/null +++ b/pynest/nest/lib/_hl_api_parallel_computing.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_parallel_computing.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions for parallel computing +""" + +from ..ll_api import * +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel + +__all__ = [ + 'NumProcesses', + 'Rank', + 'GetLocalVPs', + 'SetAcceptableLatency', + 'SetMaxBuffered', + 'SyncProcesses', +] + + +@check_stack +def Rank(): + """Return the MPI rank of the local process. + + Returns + ------- + int: + MPI rank of the local process + + Note + ---- + DO NOT USE `Rank()` TO EXECUTE ANY FUNCTION IMPORTED FROM THE `nest` + MODULE ON A SUBSET OF RANKS IN AN MPI-PARALLEL SIMULATION. + + This will lead to unpredictable behavior. Symptoms may be an + error message about non-synchronous global random number generators + or deadlocks during simulation. In the worst case, the simulation + may complete but generate nonsensical results. + """ + + return nestkernel.llapi_get_rank() + + +@check_stack +def NumProcesses(): + """Return the overall number of MPI processes. + + Returns + ------- + int: + Number of overall MPI processes + """ + + return nestkernel.llapi_get_num_mpi_processes() + + +@check_stack +def SetAcceptableLatency(port_name, latency): + """Set the acceptable `latency` (in ms) for a MUSIC port. + + Parameters + ---------- + port_name : str + MUSIC port to set latency for + latency : float + Latency in ms + """ + + sps(kernel.SLILiteral(port_name)) + sps(latency) + sr("SetAcceptableLatency") + + +@check_stack +def SetMaxBuffered(port_name, size): + """Set the maximum buffer size for a MUSIC port. + + Parameters + ---------- + port_name : str + MUSIC port to set buffer size for + size : int + Buffer size + """ + + sps(kernel.SLILiteral(port_name)) + sps(size) + sr("SetMaxBuffered") + + +@check_stack +def SyncProcesses(): + """Synchronize all MPI processes. + """ + + sr("SyncProcesses") + + +@check_stack +def GetLocalVPs(): + """Return iterable representing the VPs local to the MPI rank. + """ + + # Compute local VPs as range based on round-robin logic in + # VPManager::get_vp(). mpitest_get_local_vps ensures this is in + # sync with the kernel. + n_vp = sli_func("GetKernelStatus /total_num_virtual_procs get") + return range(Rank(), n_vp, NumProcesses()) diff --git a/pynest/nest/lib/_hl_api_simulation.py b/pynest/nest/lib/_hl_api_simulation.py new file mode 100644 index 0000000000..59fc043c7f --- /dev/null +++ b/pynest/nest/lib/_hl_api_simulation.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_simulation.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions for simulation control +""" + +from contextlib import contextmanager +import warnings + +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel + +from ..ll_api import * +from ._hl_api_helper import is_iterable +from ._hl_api_parallel_computing import Rank + +__all__ = [ + 'Cleanup', + 'DisableStructuralPlasticity', + 'EnableStructuralPlasticity', + 'GetKernelStatus', + 'Install', + 'Prepare', + 'ResetKernel', + 'Run', + 'RunManager', + 'SetKernelStatus', + 'Simulate', +] + + +@check_stack +def Simulate(t): + """Simulate the network for `t` milliseconds. + + Parameters + ---------- + t : float + Time to simulate in ms + + See Also + -------- + RunManager + + """ + + nestkernel.llapi_simulate(t) + + +@check_stack +def Run(t): + """Simulate the network for `t` milliseconds. + + Parameters + ---------- + t : float + Time to simulate in ms + + Notes + ------ + + Call between `Prepare` and `Cleanup` calls, or within a + ``with RunManager`` clause. + + Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup() + + `Prepare` must be called before `Run` to calibrate the system, and + `Cleanup` must be called after `Run` to close files, cleanup handles, and + so on. After `Cleanup`, `Prepare` can and must be called before more `Run` + calls. + + Be careful about modifying the network or neurons between `Prepare` and `Cleanup` + calls. In particular, do not call `Create`, `Connect`, or `SetKernelStatus`. + Changing the membrane potential `V_m` of neurons or synaptic weights (but not delays!) + will in most cases work as expected, while changing membrane or synaptic times + constants will not work correctly. If in doubt, assume that changes may cause + undefined behavior and check these thoroughly. + + Also note that `local_spike_counter` is reset each time you call `Run`. + + See Also + -------- + Prepare, Cleanup, RunManager, Simulate + + """ + nestkernel.llapi_run(t) + + +@check_stack +def Prepare(): + """Calibrate the system before a `Run` call. Not needed for `Simulate`. + + See Also + -------- + Run, Cleanup + + """ + nestkernel.llapi_prepare() + + +@check_stack +def Cleanup(): + """Cleans up resources after a `Run` call. Not needed for `Simulate`. + + Closes state for a series of runs, such as flushing and closing files. + A `Prepare` is needed after a `Cleanup` before any more calls to `Run`. + + See Also + -------- + Run, Prepare + + """ + nestkernel.llapi_cleanup() + + +@contextmanager +def RunManager(): + """ContextManager for `Run` + + Calls `Prepare` before a series of `Run` calls, and calls `Cleanup` at end. + + For example: + + :: + + with RunManager(): + for _ in range(10): + Run(100) + # extract results + + Notes + ----- + Be careful about modifying the network or neurons between `Prepare` and `Cleanup` + calls. In particular, do not call `Create`, `Connect`, or `SetKernelStatus`. + Changing the membrane potential `V_m` of neurons or synaptic weights (but not delays!) + will in most cases work as expected, while changing membrane or synaptic times + constants will not work correctly. If in doubt, assume that changes may cause + undefined behavior and check these thoroughly. + + See Also + -------- + Prepare, Run, Cleanup, Simulate + + """ + + Prepare() + try: + yield + finally: + Cleanup() + + +@check_stack +def ResetKernel(): + """Reset the simulation kernel. + + This will destroy the network as well as all custom models created with + :py:func:`.CopyModel`. Calling this function is equivalent to restarting NEST. + + In particular, + + * all network nodes + * all connections + * all user-defined neuron and synapse models + are deleted, and + + * time + * random generators + are reset. The only exception is that dynamically loaded modules are not + unloaded. This may change in a future version of NEST. + + """ + nestkernel.llapi_reset_kernel() + + +@check_stack +def SetKernelStatus(params): + """Set parameters for the simulation kernel. + + See the documentation of :ref:`sec:kernel_attributes` for a valid + list of params. + + Parameters + ---------- + + params : dict + Dictionary of parameters to set. + + See Also + -------- + + GetKernelStatus + + """ + # We need the nest module to be fully initialized in order to access the + # _kernel_attr_names and _readonly_kernel_attrs. As hl_api_simulation is + # imported during nest module initialization, we can't put the import on + # the module level, but have to have it on the function level. + import nest # noqa + # TODO-PYNEST-NG: Enable again when KernelAttribute works + raise_errors = params.get('dict_miss_is_error', nest.dict_miss_is_error) + valids = nest._kernel_attr_names + readonly = nest._readonly_kernel_attrs + keys = list(params.keys()) + for key in keys: + msg = None + if key not in valids: + msg = f'`{key}` is not a valid kernel parameter, ' + \ + 'valid parameters are: ' + \ + ', '.join(f"'{p}'" for p in sorted(valids)) + elif key in readonly: + msg = f'`{key}` is a readonly kernel parameter' + if msg is not None: + if raise_errors: + raise ValueError(msg) + else: + warnings.warn(msg + f' \n`{key}` has been ignored') + del params[key] + + nestkernel.llapi_set_kernel_status(params) + + +@check_stack +def GetKernelStatus(keys=None): + """Obtain parameters of the simulation kernel. + + Parameters + ---------- + + keys : str or list, optional + Single parameter name or `list` of parameter names + + Returns + ------- + + dict: + Parameter dictionary, if called without argument + type: + Single parameter value, if called with single parameter name + list: + List of parameter values, if called with list of parameter names + + Raises + ------ + + TypeError + If `keys` are of the wrong type. + + Notes + ----- + See SetKernelStatus for documentation on each parameter key. + + See Also + -------- + SetKernelStatus + + """ + + status_root = nestkernel.llapi_get_kernel_status() + + if keys is None: + return status_root + elif isinstance(keys, str): + return status_root[keys] + elif is_iterable(keys): + return tuple(status_root[k] for k in keys) + else: + raise TypeError("keys should be either a string or an iterable") + + +@check_stack +def Install(module_name): + """Load a dynamically linked NEST module. + + Parameters + ---------- + module_name : str + Name of the dynamically linked module + + Returns + ------- + handle + NEST module identifier, required for unloading + + Notes + ----- + Dynamically linked modules are searched in the NEST library + directory (``/lib/nest``) and in ``LD_LIBRARY_PATH`` (on + Linux) or ``DYLD_LIBRARY_PATH`` (on OSX). + + **Example** + :: + + nest.Install("mymodule") + + """ + + return sr("(%s) Install" % module_name) + + +@check_stack +def EnableStructuralPlasticity(): + """Enable structural plasticity for the network simulation + + See Also + -------- + DisableStructuralPlasticity + + """ + + sr('EnableStructuralPlasticity') + + +@check_stack +def DisableStructuralPlasticity(): + """Disable structural plasticity for the network simulation + + See Also + -------- + EnableStructuralPlasticity + + """ + sr('DisableStructuralPlasticity') diff --git a/pynest/nest/lib/_hl_api_spatial.py b/pynest/nest/lib/_hl_api_spatial.py new file mode 100644 index 0000000000..6d474efb7c --- /dev/null +++ b/pynest/nest/lib/_hl_api_spatial.py @@ -0,0 +1,1607 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_spatial.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions relating to spatial properties of nodes +""" + + +import numpy as np + +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel +from ._hl_api_helper import is_iterable +from ._hl_api_connections import GetConnections +from ._hl_api_parallel_computing import NumProcesses, Rank +from ._hl_api_types import NodeCollection + +try: + import matplotlib as mpl + import matplotlib.path as mpath + import matplotlib.patches as mpatches + HAVE_MPL = True +except ImportError: + HAVE_MPL = False + +__all__ = [ + 'CreateMask', + 'Displacement', + 'Distance', + 'DumpLayerConnections', + 'DumpLayerNodes', + 'FindCenterElement', + 'FindNearestElement', + 'GetPosition', + 'GetTargetNodes', + 'GetSourceNodes', + 'GetTargetPositions', + 'GetSourcePositions', + 'PlotLayer', + 'PlotProbabilityParameter', + 'PlotTargets', + 'PlotSources', + 'SelectNodesByMask', +] + + +def CreateMask(masktype, specs, anchor=None): + """ + Create a spatial mask for connections. + + Masks are used when creating connections. A mask describes the area of + the pool population that is searched for to connect for any given + node in the driver population. Several mask types are available. Examples + are the grid region, the rectangular, circular or doughnut region. + + The command :py:func:`.CreateMask` creates a `Mask` object which may be combined + with other `Mask` objects using Boolean operators. The mask is specified + in a dictionary. + + ``Mask`` objects can be passed to :py:func:`.Connect` in a connection dictionary with the key `'mask'`. + + Parameters + ---------- + masktype : str, ['rectangular' | 'circular' | 'doughnut' | 'elliptical'] + for 2D masks, ['box' | 'spherical' | 'ellipsoidal] for 3D masks, + ['grid'] only for grid-based layers in 2D. + The mask name corresponds to the geometrical shape of the mask. There + are different types for 2- and 3-dimensional layers. + specs : dict + Dictionary specifying the parameters of the provided `masktype`, + see **Mask types**. + anchor : [tuple/list of floats | dict with the keys `'column'` and \ + `'row'` (for grid masks only)], optional, default: None + By providing anchor coordinates, the location of the mask relative to + the driver node can be changed. The list of coordinates has a length + of 2 or 3 dependent on the number of dimensions. + + Returns + ------- + Mask: + Object representing the mask + + See also + -------- + Connect + + Notes + ----- + - All angles must be given in degrees. + + **Mask types** + + Available mask types (`masktype`) and their corresponding parameter + dictionaries: + + * 2D free and grid-based layers + :: + + 'rectangular' : + {'lower_left' : [float, float], + 'upper_right' : [float, float], + 'azimuth_angle': float # default:0.0} + #or + 'circular' : + {'radius' : float} + #or + 'doughnut' : + {'inner_radius' : float, + 'outer_radius' : float} + #or + 'elliptical' : + {'major_axis' : float, + 'minor_axis' : float, + 'azimuth_angle' : float, # default: 0.0, + 'anchor' : [float, float], # default: [0.0, 0.0]} + + * 3D free and grid-based layers + :: + + 'box' : + {'lower_left' : [float, float, float], + 'upper_right' : [float, float, float], + 'azimuth_angle: float # default: 0.0, + 'polar_angle : float # defualt: 0.0} + #or + 'spherical' : + {'radius' : float} + #or + 'ellipsoidal' : + {'major_axis' : float, + 'minor_axis' : float, + 'polar_axis' : float + 'azimuth_angle' : float, # default: 0.0, + 'polar_angle' : float, # default: 0.0, + 'anchor' : [float, float, float], # default: [0.0, 0.0, 0.0]}} + + * 2D grid-based layers only + :: + + 'grid' : + {'rows' : float, + 'columns' : float} + + By default the top-left corner of a grid mask, i.e., the grid + mask element with grid index [0, 0], is aligned with the driver + node. It can be changed by means of the 'anchor' parameter: + :: + + 'anchor' : + {'row' : float, + 'column' : float} + + **Example** + :: + + import nest + + # create a grid-based layer + l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # create a circular mask + m = nest.CreateMask('circular', {'radius': 0.2}) + + # connectivity specifications + conndict = {'rule': 'pairwise_bernoulli', + 'p': 1.0, + 'mask': m} + + # connect layer l with itself according to the specifications + nest.Connect(l, l, conndict) + """ + if anchor is None: + return sli_func('CreateMask', {masktype: specs}) + else: + return sli_func('CreateMask', + {masktype: specs, 'anchor': anchor}) + + +def GetPosition(nodes): + """ + Return the spatial locations of nodes. + + Parameters + ---------- + nodes : NodeCollection + `NodeCollection` of nodes we want the positions to + + Returns + ------- + tuple or tuple of tuple(s): + Tuple of position with 2- or 3-elements or list of positions + + See also + -------- + Displacement: Get vector of lateral displacement between nodes. + Distance: Get lateral distance between nodes. + DumpLayerConnections: Write connectivity information to file. + DumpLayerNodes: Write node positions to file. + + Notes + ----- + - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` + only works for nodes local to the current MPI process, if used in a + MPI-parallel simulation. + + Example + ------- + :: + + import nest + + # Reset kernel + nest.ResetKernel + + # create a NodeCollection with spatial extent + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # retrieve positions of all (local) nodes belonging to the population + pos = nest.GetPosition(s_nodes) + + # retrieve positions of the first node in the NodeCollection + pos = nest.GetPosition(s_nodes[0]) + + # retrieve positions of a subset of nodes in the population + pos = nest.GetPosition(s_nodes[2:18]) + """ + if not isinstance(nodes, NodeCollection): + raise TypeError("nodes must be a NodeCollection with spatial extent") + + return nestkernel.llapi_get_position(nodes._datum) + + +def Displacement(from_arg, to_arg): + """ + Get vector of lateral displacement from node(s)/Position(s) `from_arg` + to node(s) `to_arg`. + + Displacement is the shortest displacement, taking into account + periodic boundary conditions where applicable. If explicit positions + are given in the `from_arg` list, they are interpreted in the `to_arg` + population. + + - If one of `from_arg` or `to_arg` has length 1, and the other is longer, + the displacement from/to the single item to all other items is given. + - If `from_arg` and `to_arg` both have more than two elements, they have + to be of the same length and the displacement between each + pair is returned. + + Parameters + ---------- + from_arg : NodeCollection or tuple/list with tuple(s)/list(s) of floats + `NodeCollection` of node IDs or tuple/list of position(s) + to_arg : NodeCollection + `NodeCollection` of node IDs + + Returns + ------- + tuple: + Displacement vectors between pairs of nodes in `from_arg` and `to_arg` + + See also + -------- + Distance: Get lateral distances between nodes. + DumpLayerConnections: Write connectivity information to file. + GetPosition: Return the spatial locations of nodes. + + Notes + ----- + - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` + only works for nodes local to the current MPI process, if used in a + MPI-parallel simulation. + + **Example** + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # displacement between node 2 and 3 + print(nest.Displacement(s_nodes[1], s_nodes[2])) + + # displacment between the position (0.0., 0.0) and node 2 + print(nest.Displacement([(0.0, 0.0)], s_nodes[1])) + """ + if not isinstance(to_arg, NodeCollection): + raise TypeError("to_arg must be a NodeCollection") + + if isinstance(from_arg, np.ndarray): + from_arg = (from_arg, ) + + if (len(from_arg) > 1 and len(to_arg) > 1 and not + len(from_arg) == len(to_arg)): + raise ValueError("to_arg and from_arg must have same size unless one have size 1.") + + return sli_func('Displacement', from_arg, to_arg) + + +def Distance(from_arg, to_arg): + """ + Get lateral distances from node(s)/position(s) `from_arg` to node(s) `to_arg`. + + The distance between two nodes is the length of its displacement. + + If explicit positions are given in the `from_arg` list, they are + interpreted in the `to_arg` population. Distance is the shortest distance, + taking into account periodic boundary conditions where applicable. + + - If one of `from_arg` or `to_arg` has length 1, and the other is longer, + the displacement from/to the single item to all other items is given. + - If `from_arg` and `to_arg` both have more than two elements, they have + to be of the same length and the distance for each pair is + returned. + + Parameters + ---------- + from_arg : NodeCollection or tuple/list with tuple(s)/list(s) of floats + `NodeCollection` of node IDs or tuple/list of position(s) + to_arg : NodeCollection + `NodeCollection` of node IDs + + Returns + ------- + tuple: + Distances between `from` and `to` + + See also + -------- + Displacement: Get vector of lateral displacements between nodes. + DumpLayerConnections: Write connectivity information to file. + GetPosition: Return the spatial locations of nodes. + + Notes + ----- + - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` + only works for nodes local to the current MPI process, if used in a + MPI-parallel simulation. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # distance between node 2 and 3 + print(nest.Distance(s_nodes[1], s_nodes[2])) + + # distance between the position (0.0., 0.0) and node 2 + print(nest.Distance([(0.0, 0.0)], s_nodes[1])) + """ + if not isinstance(to_arg, NodeCollection): + raise TypeError("to_arg must be a NodeCollection") + + if isinstance(from_arg, np.ndarray): + from_arg = (from_arg, ) + + if (len(from_arg) > 1 and len(to_arg) > 1 and not + len(from_arg) == len(to_arg)): + raise ValueError("to_arg and from_arg must have same size unless one have size 1.") + + return sli_func('Distance', from_arg, to_arg) + + +def FindNearestElement(layer, locations, find_all=False): + """ + Return the node(s) closest to the `locations` in the given `layer`. + + This function works for fixed grid layer only. + + * If `locations` is a single 2-element array giving a grid location, return a + `NodeCollection` of `layer` elements at the given location. + * If `locations` is a list of coordinates, the function returns a list of `NodeCollection` of the nodes at all + locations. + + Parameters + ---------- + layer : NodeCollection + `NodeCollection` of spatially distributed node IDs + locations : tuple(s)/list(s) of tuple(s)/list(s) + 2-element list with coordinates of a single position, or list of + 2-element list of positions + find_all : bool, default: False + If there are several nodes with same minimal distance, return only the + first found, if `False`. + If `True`, instead of returning a single `NodeCollection`, return a list of `NodeCollection` + containing all nodes with minimal distance. + + Returns + ------- + NodeCollection: + `NodeCollection` of node IDs if locations is a 2-element list with coordinates of a single position + list: + list of `NodeCollection` if find_all is True or locations contains more than one position + + See also + -------- + FindCenterElement: Return NodeCollection of node closest to center of layers. + GetPosition: Return the spatial locations of nodes. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # get node ID of element closest to some location + nest.FindNearestElement(s_nodes, [3.0, 4.0], True) + """ + + if not isinstance(layer, NodeCollection): + raise TypeError("layer must be a NodeCollection") + + if not len(layer) > 0: + raise ValueError("layer cannot be empty") + + if not is_iterable(locations): + raise TypeError("locations must be coordinate array or list of coordinate arrays") + + # Ensure locations is sequence, keeps code below simpler + if not is_iterable(locations[0]): + locations = (locations, ) + + result = [] + + for loc in locations: + d = Distance(np.array(loc), layer) + + if not find_all: + dx = np.argmin(d) # finds location of one minimum + result.append(layer[dx]) + else: + minnode = list(layer[:1]) + minval = d[0] + for idx in range(1, len(layer)): + if d[idx] < minval: + minnode = [layer[idx]] + minval = d[idx] + elif np.abs(d[idx] - minval) <= 1e-14 * minval: + minnode.append(layer[idx]) + result.append(minnode) + + if len(result) == 1: + result = result[0] + + return result + + +def _rank_specific_filename(basename): + """Returns file name decorated with rank.""" + + if NumProcesses() == 1: + return basename + else: + np = NumProcesses() + np_digs = len(str(np - 1)) # for pretty formatting + rk = Rank() + dot = basename.find('.') + if dot < 0: + return '%s-%0*d' % (basename, np_digs, rk) + else: + return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:]) + + +def DumpLayerNodes(layer, outname): + """ + Write `node ID` and position data of `layer` to file. + + Write `node ID` and position data to `outname` file. For each node in `layer`, + a line with the following information is written: + :: + + node ID x-position y-position [z-position] + + If `layer` contains several `node IDs`, data for all nodes in `layer` will be written to a + single file. + + Parameters + ---------- + layer : NodeCollection + `NodeCollection` of spatially distributed node IDs + outname : str + Name of file to write to (existing files are overwritten) + + See also + -------- + DumpLayerConnections: Write connectivity information to file. + GetPosition: Return the spatial locations of nodes. + + Notes + ----- + * If calling this function from a distributed simulation, this function + will write to one file per MPI rank. + * File names are formed by adding the MPI Rank into the file name before + the file name suffix. + * Each file stores data for nodes local to that file. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # write layer node positions to file + nest.DumpLayerNodes(s_nodes, 'positions.txt') + + """ + if not isinstance(layer, NodeCollection): + raise TypeError("layer must be a NodeCollection") + + sli_func(""" + (w) file exch DumpLayerNodes close + """, + layer, _rank_specific_filename(outname)) + + +def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): + """ + Write connectivity information to file. + + This function writes connection information to file for all outgoing + connections from the given layers with the given synapse model. + + For each connection, one line is stored, in the following format: + :: + + source_node_id target_node_id weight delay dx dy [dz] + + where (dx, dy [, dz]) is the displacement from source to target node. + If targets do not have positions (eg spike recorders outside any layer), + NaN is written for each displacement coordinate. + + Parameters + ---------- + source_layers : NodeCollection + `NodeCollection` of spatially distributed node IDs + target_layers : NodeCollection + `NodeCollection` of (spatially distributed) node IDs + synapse_model : str + NEST synapse model + outname : str + Name of file to write to (will be overwritten if it exists) + + See also + -------- + DumpLayerNodes: Write layer node positions to file. + GetPosition: Return the spatial locations of nodes. + GetConnections: Return connection identifiers between + sources and targets + + Notes + ----- + * If calling this function from a distributed simulation, this function + will write to one file per MPI rank. + * File names are formed by inserting + the MPI Rank into the file name before the file name suffix. + * Each file stores data for local nodes. + + **Example** + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + nest.Connect(s_nodes, s_nodes, + {'rule': 'pairwise_bernoulli', 'p': 1.0}, + {'synapse_model': 'static_synapse'}) + + # write connectivity information to file + nest.DumpLayerConnections(s_nodes, s_nodes, 'static_synapse', 'conns.txt') + """ + if not isinstance(source_layer, NodeCollection): + raise TypeError("source_layer must be a NodeCollection") + if not isinstance(target_layer, NodeCollection): + raise TypeError("target_layer must be a NodeCollection") + + sli_func(""" + /oname Set + cvlit /synmod Set + /lyr_target Set + /lyr_source Set + oname (w) file lyr_source lyr_target synmod + DumpLayerConnections close + """, + source_layer, target_layer, synapse_model, + _rank_specific_filename(outname)) + + +def FindCenterElement(layer): + """ + Return `NodeCollection` of node closest to center of `layer`. + + Parameters + ---------- + layer : NodeCollection + `NodeCollection` with spatially distributed node IDs + + Returns + ------- + NodeCollection: + `NodeCollection` of the node closest to the center of the `layer`, as specified by `layer` + parameters given in ``layer.spatial``. If several nodes are equally close to the center, + an arbitrary one of them is returned. + + See also + -------- + FindNearestElement: Return the node(s) closest to the location(s) in the given `layer`. + GetPosition: Return the spatial locations of nodes. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) + + # get NodeCollection of the element closest to the center of the layer + nest.FindCenterElement(s_nodes) + """ + + if not isinstance(layer, NodeCollection): + raise TypeError("layer must be a NodeCollection") + nearest_to_center = FindNearestElement(layer, layer.spatial['center'])[0] + index = layer.index(nearest_to_center.get('global_id')) + return layer[index:index+1] + + +def GetTargetNodes(sources, tgt_layer, syn_model=None): + """ + Obtain targets of `sources` in given `target` population. + + For each neuron in `sources`, this function finds all target elements + in `tgt_layer`. If `syn_model` is not given (default), all targets are + returned, otherwise only targets connected via the given synapse model. + + Parameters + ---------- + sources : NodeCollection + NodeCollection with node IDs of `sources` + tgt_layer : NodeCollection + NodeCollection with node IDs of `tgt_layer` + syn_model : [None | str], optional, default: None + Return only target positions for a given synapse model. + + Returns + ------- + tuple of NodeCollection: + Tuple of `NodeCollections` of target neurons fulfilling the given criteria, one `NodeCollection` per + source node ID in `sources`. + + See also + -------- + GetTargetPositions: Obtain positions of targets in a given target layer connected to given source. + GetConnections: Return connection identifiers between + sources and targets + + Notes + ----- + * For distributed simulations, this function only returns targets on the + local MPI process. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # connectivity specifications with a mask + conndict = {'rule': 'pairwise_bernoulli', 'p': 1., + 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], + 'upper_right': [2.0, 1.0]}}} + + # connect population s_nodes with itself according to the given + # specifications + nest.Connect(s_nodes, s_nodes, conndict) + + # get the node IDs of the targets of a source neuron + nest.GetTargetNodes(s_nodes[4], s_nodes) + """ + if not isinstance(sources, NodeCollection): + raise TypeError("sources must be a NodeCollection.") + + if not isinstance(tgt_layer, NodeCollection): + raise TypeError("tgt_layer must be a NodeCollection") + + conns = GetConnections(sources, tgt_layer, synapse_model=syn_model) + + # Re-organize conns into one list per source, containing only target node IDs. + src_tgt_map = dict((snode_id, []) for snode_id in sources.tolist()) + for src, tgt in zip(conns.sources(), conns.targets()): + src_tgt_map[src].append(tgt) + + for src in src_tgt_map.keys(): + src_tgt_map[src] = NodeCollection(list(np.unique(src_tgt_map[src]))) + + # convert dict to nested list in same order as sources + return tuple(src_tgt_map[snode_id] for snode_id in sources.tolist()) + + +def GetSourceNodes(src_layer, targets, syn_model=None): + """ + Obtain sources of `targets` in given `src_layer` population. + + For each neuron in `targets`, this function finds all target elements + in `src_layer`. If `syn_model` is not given (default), all sources are + returned, otherwise only sources connected via the given synapse model. + + Parameters + ---------- + src_layer : NodeCollection + NodeCollection with node IDs of `src_layer` + targets : NodeCollection + NodeCollection with node IDs of `targets` + syn_model : [None | str], optional, default: None + Return only source positions for a given synapse model. + + Returns + ------- + tuple of NodeCollection: + Tuple of `NodeCollections` of source neurons fulfilling the given criteria, one `NodeCollection` per + target node ID in `target`. + + See also + -------- + GetSourcePositions: Obtain positions of sources in a given source layer connected to given target. + GetConnections: Return connection identifiers between sources and targets. + + Notes + ----- + * For distributed simulations, this function only returns source on the + local MPI process. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # connectivity specifications with a mask + conndict = {'rule': 'pairwise_bernoulli', 'p': 1., + 'mask': {'rectangular': {'lower_left': [-2.0, -1.0], + 'upper_right': [2.0, 1.0]}}} + + # connect population s_nodes with itself according to the given + # specifications + nest.Connect(s_nodes, s_nodes, conndict) + + # get the node IDs of the targets of a source neuron + nest.GetSourceNodes(s_nodes, s_nodes[4]) + """ + if not isinstance(src_layer, NodeCollection): + raise TypeError("src_layer must be a NodeCollection") + + if not isinstance(targets, NodeCollection): + raise TypeError("targets must be a NodeCollection.") + + conns = GetConnections(src_layer, targets, synapse_model=syn_model) + + # Re-organize conns into one list per target, containing only source node IDs. + tgt_src_map = dict((tnode_id, []) for tnode_id in targets.tolist()) + for src, tgt in zip(conns.sources(), conns.targets()): + tgt_src_map[tgt].append(src) + + for tgt in tgt_src_map.keys(): + tgt_src_map[tgt] = NodeCollection(list(np.unique(tgt_src_map[tgt]))) + + # convert dict to nested list in same order as sources + return tuple(tgt_src_map[tnode_id] for tnode_id in targets.tolist()) + + +def GetTargetPositions(sources, tgt_layer, syn_model=None): + """ + Obtain positions of targets to a given `NodeCollection` of `sources`. + + For each neuron in `sources`, this function finds all target elements + in `tgt_layer`. If `syn_model` is not given (default), all targets are + returned, otherwise only targets connected via the given syanpse model. + + Parameters + ---------- + sources : NodeCollection + `NodeCollection` with node ID(s) of source neurons + tgt_layer : NodeCollection + `NodeCollection` of tgt_layer + syn_type : [None | str], optional, default: None + Return only target positions for a given synapse model. + + Returns + ------- + list of list(s) of tuple(s) of floats: + Positions of target neurons fulfilling the given criteria as a nested + list, containing one list of positions per node in sources. + + See also + -------- + GetTargetNodes: Obtain targets of a `NodeCollection` of sources in a given target + population. + + Notes + ----- + * For distributed simulations, this function only returns targets on the + local MPI process. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # connectivity specifications with a mask + conndict = {'rule': 'pairwise_bernoulli', 'p': 1., + 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], + 'upper_right': [2.0, 1.0]}}} + + # connect population s_nodes with itself according to the given + # specifications + nest.Connect(s_nodes, s_nodes, conndict) + + # get the positions of the targets of a source neuron + nest.GetTargetPositions(s_nodes[5], s_nodes) + """ + if not isinstance(sources, NodeCollection): + raise TypeError("sources must be a NodeCollection.") + + # Find positions to all nodes in target layer + pos_all_tgts = GetPosition(tgt_layer) + first_tgt_node_id = tgt_layer[0].get('global_id') + + connections = GetConnections(sources, tgt_layer, + synapse_model=syn_model) + srcs = connections.get('source') + tgts = connections.get('target') + if isinstance(srcs, int): + srcs = [srcs] + if isinstance(tgts, int): + tgts = [tgts] + + # Make dictionary where the keys are the source node_ids, which is mapped to a + # list with the positions of the targets connected to the source. + src_tgt_pos_map = dict((snode_id, []) for snode_id in sources.tolist()) + + for i in range(len(connections)): + tgt_indx = tgts[i] - first_tgt_node_id + src_tgt_pos_map[srcs[i]].append(pos_all_tgts[tgt_indx]) + + # Turn dict into list in same order as sources + return [src_tgt_pos_map[snode_id] for snode_id in sources.tolist()] + + +def GetSourcePositions(src_layer, targets, syn_model=None): + """ + Obtain positions of sources to a given `NodeCollection` of `targets`. + + For each neuron in `targets`, this function finds all source elements + in `src_layer`. If `syn_model` is not given (default), all targets are + returned, otherwise only sources connected via the given synapse model. + + Parameters + ---------- + src_layer : NodeCollection + `NodeCollection` of src_layer + targets : NodeCollection + `NodeCollection` with node ID(s) of target neurons + syn_type : [None | str], optional, default: None + Return only source positions for a given synapse model. + + Returns + ------- + list of list(s) of tuple(s) of floats: + Positions of source neurons fulfilling the given criteria as a nested + list, containing one list of positions per node in targets. + + See also + -------- + GetSourceNodes: Obtain sources of a `NodeCollection` of targets in a given source + population. + + Notes + ----- + * For distributed simulations, this function only returns sources on the + local MPI process. + + Example + ------- + :: + + import nest + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # connectivity specifications with a mask + conndict = {'rule': 'pairwise_bernoulli', 'p': 1., + 'mask': {'rectangular': {'lower_left': [-2.0, -1.0], + 'upper_right': [2.0, 1.0]}}} + + # connect population s_nodes with itself according to the given + # specifications + nest.Connect(s_nodes, s_nodes, conndict) + + # get the positions of the targets of a source neuron + nest.GetSourcePositions(s_nodes, s_nodes[5]) + """ + if not isinstance(targets, NodeCollection): + raise TypeError("targets must be a NodeCollection.") + + # Find positions to all nodes in source layer + pos_all_srcs = GetPosition(src_layer) + first_src_node_id = src_layer[0].get('global_id') + + connections = GetConnections(src_layer, targets, + synapse_model=syn_model) + srcs = connections.get('source') + tgts = connections.get('target') + if isinstance(srcs, int): + srcs = [srcs] + if isinstance(tgts, int): + tgts = [tgts] + + # Make dictionary where the keys are the target node_ids, which is mapped to a + # list with the positions of the sources connected to the targets. + tgt_src_pos_map = dict((tnode_id, []) for tnode_id in targets.tolist()) + for i in range(len(connections)): + src_indx = srcs[i] - first_src_node_id + tgt_src_pos_map[tgts[i]].append(pos_all_srcs[src_indx]) + + # Turn dict into list in same order as target + return [tgt_src_pos_map[tnode_id] for tnode_id in targets.tolist()] + + +def SelectNodesByMask(layer, anchor, mask_obj): + """ + Obtain the node IDs inside a masked area of a spatially distributed population. + + The function finds and returns all the node IDs inside a given mask of a + `layer`. The node IDs are returned as a `NodeCollection`. The function works on both 2-dimensional and + 3-dimensional masks and layers. All mask types are allowed, including combined masks. + + Parameters + ---------- + layer : NodeCollection + `NodeCollection` with node IDs of the `layer` to select nodes from. + anchor : tuple/list of double + List containing center position of the layer. This is the point from + where we start to search. + mask_obj: object + `Mask` object specifying chosen area. + + Returns + ------- + NodeCollection: + `NodeCollection` of nodes/elements inside the mask. + """ + + if not isinstance(layer, NodeCollection): + raise TypeError("layer must be a NodeCollection.") + + mask_datum = mask_obj._datum + + node_id_list = sli_func('SelectNodesByMask', + layer, anchor, mask_datum) + + # When creating a NodeCollection, the input list of nodes IDs must be sorted. + return NodeCollection(sorted(node_id_list)) + + +def _draw_extent(ax, xctr, yctr, xext, yext): + """Draw extent and set aspect ration, limits""" + + # import pyplot here and not at toplevel to avoid preventing users + # from changing matplotlib backend after importing nest + import matplotlib.pyplot as plt + + # thin gray line indicating extent + llx, lly = xctr - xext / 2.0, yctr - yext / 2.0 + urx, ury = llx + xext, lly + yext + ax.add_patch( + plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1, + zorder=1)) + + # set limits slightly outside extent + ax.set(aspect='equal', + xlim=(llx - 0.05 * xext, urx + 0.05 * xext), + ylim=(lly - 0.05 * yext, ury + 0.05 * yext), + xticks=tuple(), yticks=tuple()) + + +def _shifted_positions(pos, ext): + """Get shifted positions corresponding to boundary conditions.""" + return [[pos[0] + ext[0], pos[1]], + [pos[0] - ext[0], pos[1]], + [pos[0], pos[1] + ext[1]], + [pos[0], pos[1] - ext[1]], + [pos[0] + ext[0], pos[1] - ext[1]], + [pos[0] - ext[0], pos[1] + ext[1]], + [pos[0] + ext[0], pos[1] + ext[1]], + [pos[0] - ext[0], pos[1] - ext[1]]] + + +def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): + """ + Plot all nodes in a `layer`. + + Parameters + ---------- + layer : NodeCollection + `NodeCollection` of spatially distributed nodes + fig : [None | matplotlib.figure.Figure object], optional, default: None + Matplotlib figure to plot to. If not given, a new figure is + created. + nodecolor : [None | any matplotlib color], optional, default: 'b' + Color for nodes + nodesize : float, optional, default: 20 + Marker size for nodes + + Returns + ------- + `matplotlib.figure.Figure` object + + See also + -------- + PlotProbabilityParameter: Create a plot of the connection probability and/or mask. + PlotTargets: Plot all targets of a given source. + matplotlib.figure.Figure : matplotlib Figure class + + Notes + ----- + * Do **not** use this function in distributed simulations. + + + Example + ------- + :: + + import nest + import matplotlib.pyplot as plt + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # plot layer with all its nodes + nest.PlotLayer(s_nodes) + plt.show() + """ + + # import pyplot here and not at toplevel to avoid preventing users + # from changing matplotlib backend after importing nest + import matplotlib.pyplot as plt + + if not HAVE_MPL: + raise ImportError('Matplotlib could not be imported') + + if not isinstance(layer, NodeCollection): + raise TypeError('layer must be a NodeCollection.') + + # get layer extent + ext = layer.spatial['extent'] + + if len(ext) == 2: + # 2D layer + + # get layer extent and center, x and y + xext, yext = ext + xctr, yctr = layer.spatial['center'] + + # extract position information, transpose to list of x and y pos + if len(layer) == 1: + # handle case of single node + xpos, ypos = GetPosition(layer) + else: + xpos, ypos = zip(*GetPosition(layer)) + + if fig is None: + fig = plt.figure() + ax = fig.add_subplot(111) + else: + ax = fig.gca() + + ax.scatter(xpos, ypos, s=nodesize, facecolor=nodecolor) + _draw_extent(ax, xctr, yctr, xext, yext) + + elif len(ext) == 3: + # 3D layer + from mpl_toolkits.mplot3d import Axes3D + + # extract position information, transpose to list of x,y,z pos + if len(layer) == 1: + # handle case of single node + pos = GetPosition(layer) + else: + pos = zip(*GetPosition(layer)) + + if fig is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + else: + ax = fig.gca() + + ax.scatter(*pos, s=nodesize, c=nodecolor) + plt.draw_if_interactive() + + else: + raise ValueError("unexpected dimension of layer") + + return fig + + +def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, + mask=None, probability_parameter=None, + src_color='red', src_size=50, tgt_color='blue', tgt_size=20, + mask_color='yellow', probability_cmap='Greens'): + """ + Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`. + + Parameters + ---------- + src_nrn : NodeCollection + `NodeCollection` of source neuron (as single-element NodeCollection) + tgt_layer : NodeCollection + `NodeCollection` of tgt_layer + syn_type : [None | str], optional, default: None + Show only targets connected with a given synapse type + fig : [None | matplotlib.figure.Figure object], optional, default: None + Matplotlib figure to plot to. If not given, a new figure is created. + mask : [None | dict], optional, default: None + Draw mask with targets; see :py:func:`.PlotProbabilityParameter` for details. + probability_parameter : [None | Parameter], optional, default: None + Draw connection probability with targets; see :py:func:`.PlotProbabilityParameter` for details. + src_color : [None | any matplotlib color], optional, default: 'red' + Color used to mark source node position + src_size : float, optional, default: 50 + Size of source marker (see scatter for details) + tgt_color : [None | any matplotlib color], optional, default: 'blue' + Color used to mark target node positions + tgt_size : float, optional, default: 20 + Size of target markers (see scatter for details) + mask_color : [None | any matplotlib color], optional, default: 'red' + Color used for line marking mask + probability_cmap : [None | any matplotlib cmap color], optional, default: 'Greens' + Color used for lines marking probability parameter. + + Returns + ------- + matplotlib.figure.Figure object + + See also + -------- + PlotSources: Plot all sources of target neuron in a source layer. + GetTargetNodes: Obtain targets of a sources in a given target layer. + GetTargetPositions: Obtain positions of targets of sources in a given target layer. + probability_parameter: Add indication of connection probability and mask to axes. + PlotLayer: Plot all nodes in a spatially distributed population. + matplotlib.pyplot.scatter : matplotlib scatter plot. + + Notes + ----- + * Do **not** use this function in distributed simulations. + + **Example** + :: + + import nest + import matplotlib.pyplot as plt + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # connectivity specifications with a mask + conndict = {'rule': 'pairwise_bernoulli', 'p': 1., + 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], + 'upper_right': [2.0, 1.0]}}} + + # connect population s_nodes with itself according to the given + # specifications + nest.Connect(s_nodes, s_nodes, conndict) + + # plot the targets of a source neuron + nest.PlotTargets(s_nodes[4], s_nodes) + plt.show() + """ + + # import pyplot here and not at toplevel to avoid preventing users + # from changing matplotlib backend after importing nest + import matplotlib.pyplot as plt + + if not HAVE_MPL: + raise ImportError("Matplotlib could not be imported") + + if not isinstance(src_nrn, NodeCollection) or len(src_nrn) != 1: + raise TypeError("src_nrn must be a single element NodeCollection.") + if not isinstance(tgt_layer, NodeCollection): + raise TypeError("tgt_layer must be a NodeCollection.") + + # get position of source + srcpos = GetPosition(src_nrn) + + # get layer extent + ext = tgt_layer.spatial['extent'] + + if len(ext) == 2: + # 2D layer + + # get layer extent and center, x and y + xext, yext = ext + xctr, yctr = tgt_layer.spatial['center'] + + if fig is None: + fig = plt.figure() + ax = fig.add_subplot(111) + else: + ax = fig.gca() + + # get positions, reorganize to x and y vectors + tgtpos = GetTargetPositions(src_nrn, tgt_layer, syn_type) + if tgtpos: + xpos, ypos = zip(*tgtpos[0]) + ax.scatter(xpos, ypos, s=tgt_size, facecolor=tgt_color) + + ax.scatter(srcpos[:1], srcpos[1:], s=src_size, facecolor=src_color, alpha=0.4, zorder=-10) + + if mask is not None or probability_parameter is not None: + edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] + PlotProbabilityParameter(src_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, + prob_cmap=probability_cmap, mask_color=mask_color) + + _draw_extent(ax, xctr, yctr, xext, yext) + + else: + # 3D layer + from mpl_toolkits.mplot3d import Axes3D + + if fig is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + else: + ax = fig.gca() + + # get positions, reorganize to x,y,z vectors + tgtpos = GetTargetPositions(src_nrn, tgt_layer, syn_type) + if tgtpos: + xpos, ypos, zpos = zip(*tgtpos[0]) + ax.scatter3D(xpos, ypos, zpos, s=tgt_size, facecolor=tgt_color) + + ax.scatter3D(srcpos[:1], srcpos[1:2], srcpos[2:], s=src_size, facecolor=src_color, alpha=0.4, zorder=-10) + + plt.draw_if_interactive() + + return fig + + +def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, + mask=None, probability_parameter=None, + tgt_color='red', tgt_size=50, src_color='blue', src_size=20, + mask_color='yellow', probability_cmap='Greens'): + """ + Plot all sources of target neuron `tgt_nrn` in a source layer `src_layer`. + + Parameters + ---------- + src_layer : NodeCollection + `NodeCollection` of src_layer + tgt_nrn : NodeCollection + `NodeCollection` of target neuron (as single-element NodeCollection) + syn_type : [None | str], optional, default: None + Show only targets connected with a given synapse type + fig : [None | matplotlib.figure.Figure object], optional, default: None + Matplotlib figure to plot to. If not given, a new figure is created. + mask : [None | dict], optional, default: None + Draw mask with targets; see :py:func:`.PlotProbabilityParameter` for details. + probability_parameter : [None | Parameter], optional, default: None + Draw connection probability with targets; see :py:func:`.PlotProbabilityParameter` for details. + tgt_color : [None | any matplotlib color], optional, default: 'red' + Color used to mark target node position + tgt_size : float, optional, default: 50 + Size of target marker (see scatter for details) + src_color : [None | any matplotlib color], optional, default: 'blue' + Color used to mark source node positions + src_size : float, optional, default: 20 + Size of source markers (see scatter for details) + mask_color : [None | any matplotlib color], optional, default: 'red' + Color used for line marking mask + probability_cmap : [None | any matplotlib cmap color], optional, default: 'Greens' + Color used for lines marking probability parameter. + + Returns + ------- + matplotlib.figure.Figure object + + See also + -------- + PlotTargets: Plot all targets of source neuron in a target layer. + GetSourceNodes: Obtain sources of a target in a given source layer. + GetSourcePositions: Obtain positions of sources of target in a given source layer. + probability_parameter: Add indication of connection probability and mask to axes. + PlotLayer: Plot all nodes in a spatially distributed population. + matplotlib.pyplot.scatter : matplotlib scatter plot. + + Notes + ----- + * Do **not** use this function in distributed simulations. + + **Example** + :: + + import nest + import matplotlib.pyplot as plt + + # create a spatial population + s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) + + # connectivity specifications with a mask + conndict = {'rule': 'pairwise_bernoulli', 'p': 1., + 'use_on_source': True, + 'mask': {'rectangular': {'lower_left': [-2.0, -1.0], + 'upper_right': [2.0, 1.0]}}} + + # connect population s_nodes with itself according to the given + # specifications + nest.Connect(s_nodes, s_nodes, conndict) + + # plot the targets of a source neuron + nest.PlotSources(s_nodes, s_nodes[4]) + plt.show() + """ + + # import pyplot here and not at toplevel to avoid preventing users + # from changing matplotlib backend after importing nest + import matplotlib.pyplot as plt + + if not isinstance(tgt_nrn, NodeCollection) or len(tgt_nrn) != 1: + raise TypeError("tgt_nrn must be a single element NodeCollection.") + if not isinstance(src_layer, NodeCollection): + raise TypeError("src_layer must be a NodeCollection.") + + # get position of source + tgtpos = GetPosition(tgt_nrn) + + # get layer extent + ext = src_layer.spatial['extent'] + + if len(ext) == 2: + # 2D layer + + # get layer extent and center, x and y + xext, yext = ext + xctr, yctr = src_layer.spatial['center'] + + if fig is None: + fig = plt.figure() + ax = fig.add_subplot(111) + else: + ax = fig.gca() + + # get positions, reorganize to x and y vectors + srcpos = GetSourcePositions(src_layer, tgt_nrn, syn_type) + if srcpos: + xpos, ypos = zip(*srcpos[0]) + ax.scatter(xpos, ypos, s=src_size, facecolor=src_color) + + ax.scatter(tgtpos[:1], tgtpos[1:], s=tgt_size, facecolor=src_color, alpha=0.4, zorder=-10) + + if mask is not None or probability_parameter is not None: + edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] + PlotProbabilityParameter(tgt_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, + prob_cmap=probability_cmap, mask_color=mask_color) + + _draw_extent(ax, xctr, yctr, xext, yext) + + else: + # 3D layer + from mpl_toolkits.mplot3d import Axes3D + + if fig is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + else: + ax = fig.gca() + + # get positions, reorganize to x,y,z vectors + srcpos = GetSourcePositions(src_layer, tgt_nrn, syn_type) + if tgtpos: + xpos, ypos, zpos = zip(*srcpos[0]) + ax.scatter3D(xpos, ypos, zpos, s=src_size, facecolor=tgt_color) + + ax.scatter3D(tgtpos[:1], tgtpos[1:2], tgtpos[2:], s=tgt_size, facecolor=tgt_color, alpha=0.4, zorder=-10) + + plt.draw_if_interactive() + + return fig + + +def _create_mask_patches(mask, periodic, extent, source_pos, face_color='yellow'): + """Create Matplotlib Patch objects representing the mask""" + + # import pyplot here and not at toplevel to avoid preventing users + # from changing matplotlib backend after importing nest + import matplotlib.pyplot as plt + import matplotlib as mtpl + + edge_color = 'black' + alpha = 0.2 + line_width = 2 + mask_patches = [] + + if 'anchor' in mask: + offs = np.array(mask['anchor']) + else: + offs = np.array([0., 0.]) + + if 'circular' in mask: + r = mask['circular']['radius'] + + patch = plt.Circle(source_pos + offs, radius=r, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + mask_patches.append(patch) + + if periodic: + for pos in _shifted_positions(source_pos + offs, extent): + patch = plt.Circle(pos, radius=r, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + mask_patches.append(patch) + elif 'doughnut' in mask: + # Mmm... doughnut + def make_doughnut_patch(pos, r_out, r_in, ec, fc, alpha): + def make_circle(r): + t = np.arange(0, np.pi * 2.0, 0.01) + t = t.reshape((len(t), 1)) + x = r * np.cos(t) + y = r * np.sin(t) + return np.hstack((x, y)) + outside_verts = make_circle(r_out)[::-1] + inside_verts = make_circle(r_in) + codes = np.ones(len(inside_verts), dtype=mpath.Path.code_type) * mpath.Path.LINETO + codes[0] = mpath.Path.MOVETO + vertices = np.concatenate([outside_verts, inside_verts]) + vertices += pos + all_codes = np.concatenate((codes, codes)) + path = mpath.Path(vertices, all_codes) + return mpatches.PathPatch(path, fc=fc, ec=ec, alpha=alpha, lw=line_width) + + r_in = mask['doughnut']['inner_radius'] + r_out = mask['doughnut']['outer_radius'] + pos = source_pos + offs + patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) + mask_patches.append(patch) + if periodic: + for pos in _shifted_positions(source_pos + offs, extent): + patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) + mask_patches.append(patch) + elif 'rectangular' in mask: + ll = np.array(mask['rectangular']['lower_left']) + ur = np.array(mask['rectangular']['upper_right']) + width = ur[0] - ll[0] + height = ur[1] - ll[1] + pos = source_pos + ll + offs + cntr = [pos[0] + width/2, pos[1] + height/2] + + if 'azimuth_angle' in mask['rectangular']: + angle = mask['rectangular']['azimuth_angle'] + else: + angle = 0.0 + + patch = plt.Rectangle(pos, width, height, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + # Need to rotate about center + trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData + patch.set_transform(trnsf) + mask_patches.append(patch) + + if periodic: + for pos in _shifted_positions(source_pos + ll + offs, extent): + patch = plt.Rectangle(pos, width, height, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + + cntr = [pos[0] + width/2, pos[1] + height/2] + # Need to rotate about center + trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData + patch.set_transform(trnsf) + mask_patches.append(patch) + elif 'elliptical' in mask: + width = mask['elliptical']['major_axis'] + height = mask['elliptical']['minor_axis'] + if 'azimuth_angle' in mask['elliptical']: + angle = mask['elliptical']['azimuth_angle'] + else: + angle = 0.0 + if 'anchor' in mask['elliptical']: + anchor = mask['elliptical']['anchor'] + else: + anchor = np.array([0., 0.]) + patch = mpl.patches.Ellipse(source_pos + offs + anchor, width, height, + angle=angle, fc=face_color, + ec=edge_color, alpha=alpha, lw=line_width) + mask_patches.append(patch) + + if periodic: + for pos in _shifted_positions(source_pos + offs + anchor, extent): + patch = mpl.patches.Ellipse(pos, width, height, angle=angle, fc=face_color, + ec=edge_color, alpha=alpha, lw=line_width) + mask_patches.append(patch) + else: + raise ValueError('Mask type cannot be plotted with this version of PyNEST.') + return mask_patches + + +def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5, -0.5, 0.5], shape=[100, 100], + ax=None, prob_cmap='Greens', mask_color='yellow'): + """ + Create a plot of the connection probability and/or mask. + + A probability plot is created based on a `Parameter` and a `source`. The + `Parameter` should have a distance dependency. The `source` must be given + as a `NodeCollection` with a single node ID. Optionally a `mask` can also be + plotted. + + Parameters + ---------- + source : NodeCollection + Single node ID `NodeCollection` to use as source. + parameter : Parameter + `Parameter` the probability is based on. + mask : Dictionary + Optional specification of a connection mask. Connections will only + be made to nodes inside the mask. See :py:func:`.CreateMask` for options on + how to specify the mask. + edges : list/tuple + List of four edges of the region to plot. The values are given as + [x_min, x_max, y_min, y_max]. + shape : list/tuple + Number of `Parameter` values to calculate in each direction. + ax : matplotlib.axes.AxesSubplot, + A matplotlib axes instance to plot in. If none is given, + a new one is created. + """ + + # import pyplot here and not at toplevel to avoid preventing users + # from changing matplotlib backend after importing nest + import matplotlib.pyplot as plt + + if not HAVE_MPL: + raise ImportError('Matplotlib could not be imported') + + if parameter is None and mask is None: + raise ValueError('At least one of parameter or mask must be specified') + if ax is None: + fig, ax = plt.subplots() + ax.set_xlim(*edges[:2]) + ax.set_ylim(*edges[2:]) + + if parameter is not None: + z = np.zeros(shape[::-1]) + for i, x in enumerate(np.linspace(edges[0], edges[1], shape[0])): + positions = [[x, y] for y in np.linspace(edges[2], edges[3], shape[1])] + values = parameter.apply(source, positions) + z[:, i] = np.array(values) + img = ax.imshow(np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, + origin='lower', cmap=prob_cmap, vmin=0., vmax=1.) + plt.colorbar(img, ax=ax, fraction=0.046, pad=0.04) + + if mask is not None: + periodic = source.spatial['edge_wrap'] + extent = source.spatial['extent'] + source_pos = GetPosition(source) + patches = _create_mask_patches(mask, periodic, extent, source_pos, face_color=mask_color) + for patch in patches: + patch.set_zorder(0.5) + ax.add_patch(patch) diff --git a/pynest/nest/lib/_hl_api_types.py b/pynest/nest/lib/_hl_api_types.py new file mode 100644 index 0000000000..89d832f838 --- /dev/null +++ b/pynest/nest/lib/_hl_api_types.py @@ -0,0 +1,1221 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_types.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Classes defining the different PyNEST types +""" + +from ..ll_api import * +from .. import pynestkernel as kernel +from .. import nestkernel_api as nestkernel +from ._hl_api_helper import ( + get_parameters, + get_parameters_hierarchical_addressing, + is_iterable, + restructure_data, +) +from ._hl_api_simulation import GetKernelStatus + + +def sli_func(*args, **kwargs): + raise RuntimeError(f'Called sli_func with\nargs: {args}\nkwargs: {kwargs}') + +import numpy +import json +from math import floor, log + +try: + import pandas + HAVE_PANDAS = True +except ImportError: + HAVE_PANDAS = False + +__all__ = [ + 'CollocatedSynapses', + 'Compartments', + 'CreateParameter', + 'Mask', + 'NodeCollection', + 'Parameter', + 'Receptors', + 'serializable', + 'SynapseCollection', + 'to_json', +] + + +def CreateParameter(parametertype, specs): + """ + Create a parameter. + + Parameters + ---------- + parametertype : string + Parameter type with or without distance dependency. + Can be one of the following: 'constant', 'linear', 'exponential', 'gaussian', 'gaussian2D', + 'uniform', 'normal', 'lognormal', 'distance', 'position' + specs : dict + Dictionary specifying the parameters of the provided + `parametertype`, see **Parameter types**. + + + Returns + ------- + ``Parameter``: + Object representing the parameter + + Notes + ----- + - Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for + instance :py:func:`.uniform`. + + **Parameter types** + + Examples of available parameter types (`parametertype` parameter), with their function and + acceptable keys for their corresponding specification dictionaries: + + * Constant + :: + + 'constant' : + {'value' : float} # constant value + * Randomization + :: + + # random parameter with uniform distribution in [min,max) + 'uniform' : + {'min' : float, # minimum value, default: 0.0 + 'max' : float} # maximum value, default: 1.0 + + # random parameter with normal distribution + 'normal': + {'mean' : float, # mean value, default: 0.0 + 'std' : float} # standard deviation, default: 1.0 + + # random parameter with lognormal distribution + 'lognormal' : + {'mean' : float, # mean value of logarithm, default: 0.0 + 'std' : float} # standard deviation of log, default: 1.0 + """ + return nestkernel.llapi_create_parameter({parametertype: specs}) + + +class NodeCollectionIterator: + """ + Iterator class for `NodeCollection`. + + Returns + ------- + `NodeCollection`: + Single node ID `NodeCollection` of respective iteration. + """ + + def __init__(self, nc): + self._nc = nc + self._increment = 0 + + def __iter__(self): + return self + + def __next__(self): + if self._increment > len(self._nc) - 1: + raise StopIteration + + index = self._increment + (self._increment >= 0) + val = nestkernel.llapi_slice(self._nc._datum, index, index, 1) + self._increment += 1 + return val + + +class NodeCollection: + """ + Class for `NodeCollection`. + + `NodeCollection` represents the nodes of a network. The class supports + iteration, concatenation, indexing, slicing, membership, length, conversion to and + from lists, test for membership, and test for equality. By using the + membership functions :py:func:`get()` and :py:func:`set()`, you can get and set desired + parameters. + + A `NodeCollection` is created by the :py:func:`.Create` function, or by converting a + list of nodes to a `NodeCollection` with ``nest.NodeCollection(list)``. + + If your nodes have spatial extent, use the member parameter ``spatial`` to get the spatial information. + + Slicing a NodeCollection follows standard Python slicing syntax: nc[start:stop:step], where start and stop + gives the zero-indexed right-open range of nodes, and step gives the step length between nodes. The step must + be strictly positive. + + Example + ------- + :: + + import nest + + nest.ResetKernel() + + # Create NodeCollection representing nodes + nc = nest.Create('iaf_psc_alpha', 10) + + # Convert from list + node_ids_in = [2, 4, 6, 8] + new_nc = nest.NodeCollection(node_ids_in) + + # Convert to list + nc_list = nc.tolist() + + # Concatenation + Enrns = nest.Create('aeif_cond_alpha', 600) + Inrns = nest.Create('iaf_psc_alpha', 400) + nrns = Enrns + Inrns + + # Slicing and membership + print(new_nc[2]) + print(new_nc[1:2]) + 6 in new_nc + """ + + _datum = None + + def __init__(self, data=None): + if data is None: + data = [] + if isinstance(data, nestkernel.NodeCollectionObject): + self._datum = data + else: + # Data from user, must be converted to datum + # Data can be anything that can be converted to a NodeCollection, + # such as list, tuple, etc. + nc = nestkernel.llapi_make_nodecollection(data) + self._datum = nc._datum + + def __iter__(self): + return NodeCollectionIterator(self) + + def __add__(self, other): + if not isinstance(other, NodeCollection): + raise NotImplementedError() + + return nestkernel.llapi_join_nc(self._datum, other._datum) + + def __getitem__(self, key): + if isinstance(key, slice): + if key.start is None: + start = 1 + else: + start = key.start + 1 if key.start >= 0 else key.start + if abs(start) > self.__len__(): + raise IndexError('slice start value outside of the NodeCollection') + if key.stop is None: + stop = self.__len__() + else: + stop = key.stop if key.stop > 0 else key.stop - 1 + if abs(stop) > self.__len__(): + raise IndexError('slice stop value outside of the NodeCollection') + step = 1 if key.step is None else key.step + if step < 1: + raise IndexError('slicing step for NodeCollection must be strictly positive') + + return nestkernel.llapi_slice(self._datum, start, stop, step) + elif isinstance(key, (int, numpy.integer)): + if abs(key + (key >= 0)) > self.__len__(): + raise IndexError('index value outside of the NodeCollection') + return self[key:key + 1:1] + elif isinstance(key, (list, tuple)): + if len(key) == 0: + return NodeCollection([]) + # Must check if elements are bool first, because bool inherits from int + if all(isinstance(x, bool) for x in key): + if len(key) != len(self): + raise IndexError('Bool index array must be the same length as NodeCollection') + np_key = numpy.array(key, dtype=bool) + # Checking that elements are not instances of bool too, because bool inherits from int + elif all(isinstance(x, int) and not isinstance(x, bool) for x in key): + np_key = numpy.array(key, dtype=numpy.uint64) + if len(numpy.unique(np_key)) != len(np_key): + raise ValueError('All node IDs in a NodeCollection have to be unique') + else: + raise TypeError('Indices must be integers or bools') + return nestkernel.llapi_take_array_index(self._datum, np_key) + elif isinstance(key, numpy.ndarray): + if len(key) == 0: + return NodeCollection([]) + if len(key.shape) != 1: + raise TypeError('NumPy indices must one-dimensional') + is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type) + if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)): + raise TypeError('NumPy indices must be an array of integers or bools') + if is_booltype and len(key) != len(self): + raise IndexError('Bool index array must be the same length as NodeCollection') + if not is_booltype and len(numpy.unique(key)) != len(key): + raise ValueError('All node IDs in a NodeCollection have to be unique') + return nestkernel.llapi_take_array_index(self._datum, key) + else: + raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices') + + def __contains__(self, node_id): + return nestkernel.llapi_nc_contains(self._datum, node_id) + + def __eq__(self, other): + if not isinstance(other, NodeCollection): + raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__)) + + if self.__len__() != other.__len__(): + return False + + return nestkernel.llapi_eq_nc(self._datum, other._datum) + + def __neq__(self, other): + if not isinstance(other, NodeCollection): + raise NotImplementedError() + + return not self == other + + def __len__(self): + return nestkernel.llapi_nc_size(self._datum) + + def __str__(self): + return nestkernel.llapi_to_string(self._datum) + + def __repr__(self): + return self.__str__() + + def get(self, *params, **kwargs): + """ + Get parameters from nodes. + + Parameters + ---------- + params : str or list, optional + Parameters to get from the nodes. It must be one of the following: + + - A single string. + - A list of strings. + - One or more strings, followed by a string or list of strings. + This is for hierarchical addressing. + output : str, ['pandas','json'], optional + If the returned data should be in a Pandas DataFrame or in a + JSON serializable format. + + Returns + ------- + int or float: + If there is a single node in the `NodeCollection`, and a single + parameter in params. + array_like: + If there are multiple nodes in the `NodeCollection`, and a single + parameter in params. + dict: + If there are multiple parameters in params. Or, if no parameters + are specified, a dictionary containing aggregated parameter-values + for all nodes is returned. + DataFrame: + Pandas Data frame if output should be in pandas format. + + Raises + ------ + TypeError + If the input params are of the wrong form. + KeyError + If the specified parameter does not exist for the nodes. + + See Also + -------- + :py:func:`set`, + + Examples + -------- + + >>> nodes.get() + {'archiver_length': (0, 0, 0), + 'beta_Ca': (0.001, 0.001, 0.001), + 'C_m': (250.0, 250.0, 250.0), + ... + 'V_th': (-55.0, -55.0, -55.0), + 'vp': (0, 0, 0)} + + >>> nodes.get('V_m') + (-70.0, -70.0, -70.0) + + >>> nodes[0].get('V_m') + -70.0 + + >>> nodes.get('V_m', 'C_m') + {'V_m': (-70.0, -70.0, -70.0), 'C_m': (250.0, 250.0, 250.0)} + + >>> voltmeter.get('events', 'senders') + array([...], dtype=int64) + """ + + if not self: + raise ValueError('Cannot get parameter of empty NodeCollection') + + # ------------------------- # + # Checks of input # + # ------------------------- # + if not kwargs: + output = '' + elif 'output' in kwargs: + output = kwargs['output'] + if output == 'pandas' and not HAVE_PANDAS: + raise ImportError('Pandas could not be imported') + else: + raise TypeError('Got unexpected keyword argument') + + pandas_output = output == 'pandas' + + if len(params) == 0: + # get() is called without arguments + result = nestkernel.llapi_get_nc_status(self._datum) + elif len(params) == 1: + # params is a tuple with a string or list of strings + result = get_parameters(self, params[0]) + if params[0] == 'compartments': + result = Compartments(self, result) + elif params[0] == 'receptors': + result = Receptors(self, result) + else: + # Hierarchical addressing + # TODO-PYNEST-NG: Drop this? Not sure anyone ever used it... + result = get_parameters_hierarchical_addressing(self, params) + + if isinstance(result, dict) and len(self) == 1: + new_result = {} + for k, v in result.items(): + new_result[k] = v[0] if is_iterable(v) and len(v) == 1 else v + result = new_result + + if pandas_output: + index = self.get('global_id') + if len(params) == 1 and isinstance(params[0], str): + # params is a string + result = {params[0]: result} + elif len(params) > 1 and isinstance(params[1], str): + # hierarchical, single string + result = {params[1]: result} + if len(self) == 1: + index = [index] + result = {key: [val] for key, val in result.items()} + result = pandas.DataFrame(result, index=index) + elif output == 'json': + result = to_json(result) + + return result + + def set(self, params=None, **kwargs): + """ + Set the parameters of nodes to params. + + If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values + can be single values or list of the same size as the `NodeCollection`. + + Parameters + ---------- + params : str or dict or list + Dictionary of parameters (either lists or single values) or list of dictionaries of parameters + of same length as the `NodeCollection`. + kwargs : keyword argument pairs + Named arguments of parameters of the elements in the `NodeCollection`. + + Raises + ------ + TypeError + If the input params are of the wrong form. + KeyError + If the specified parameter does not exist for the nodes. + + See Also + -------- + :py:func:`get`, + """ + + if not self: + return + if kwargs and params is None: + params = kwargs + elif kwargs and params: + raise TypeError("must either provide params or kwargs, but not both.") + + local_nodes = [self.local] if len(self) == 1 else self.local + + if isinstance(params, dict) and 'compartments' in params: + if isinstance(params['compartments'], Compartments): + params['compartments'] = params['compartments'].get_tuple() + elif params['compartments'] is None: + # Adding compartments has been handled by the += operator, so we can remove the entry. + params.pop('compartments') + + if isinstance(params, dict) and 'receptors' in params: + if isinstance(params['receptors'], Receptors): + params['receptors'] = params['receptors'].get_tuple() + elif params['receptors'] is None: + # Adding receptors has been handled by the += operator, so we can remove the entry. + params.pop('receptors') + + if isinstance(params, dict) and all(local_nodes): + + node_params = self[0].get() + iterable_node_param = lambda key: key in node_params and not is_iterable(node_params[key]) + contains_list = [is_iterable(vals) and iterable_node_param(key) for key, vals in params.items()] + + if any(contains_list): + temp_param = [{} for _ in range(self.__len__())] + + for key, vals in params.items(): + if not is_iterable(vals): + for temp_dict in temp_param: + temp_dict[key] = vals + else: + for i, temp_dict in enumerate(temp_param): + temp_dict[key] = vals[i] + params = temp_param + + if isinstance(params, dict): + params = [params] + + nestkernel.llapi_set_nc_status(self._datum, params) + + def tolist(self): + """ + Convert `NodeCollection` to list. + """ + if self.__len__() == 0: + return [] + + return (list(self.get('global_id')) if len(self) > 1 + else [self.get('global_id')]) + + def index(self, node_id): + """ + Find the index of a node ID in the `NodeCollection`. + + Parameters + ---------- + node_id : int + Global ID to be found. + + Raises + ------ + ValueError + If the node ID is not in the `NodeCollection`. + """ + index = nestkernel.llapi_nc_find(self._datum, node_id) + + if index == -1: + raise ValueError('{} is not in NodeCollection'.format(node_id)) + + return index + + def __bool__(self): + """Converts the NodeCollection to a bool. False if it is empty, True otherwise.""" + return len(self) > 0 + + def __array__(self, dtype=None): + """Convert the NodeCollection to a NumPy array.""" + return numpy.array(self.tolist(), dtype=dtype) + + def __getattr__(self, attr): + if not self: + raise AttributeError('Cannot get attribute of empty NodeCollection') + + if attr == 'spatial': + metadata = nestkernel.llapi_get_nc_metadata(self._datum) + val = metadata if metadata else None + super().__setattr__(attr, val) + return self.spatial + + # NumPy compatibility check: + # raises AttributeError to tell NumPy that interfaces other than + # __array__ are not available (otherwise get_parameters would be + # queried, KeyError would be raised, and all would crash) + if attr.startswith('__array_'): + raise AttributeError + + return self.get(attr) + + def __setattr__(self, attr, value): + # `_datum` is the only property of NodeCollection that should not be + # interpreted as a property of the model + if attr == '_datum': + super().__setattr__(attr, value) + else: + self.set({attr: value}) + + +class SynapseCollectionIterator: + """ + Iterator class for SynapseCollection. + """ + + def __init__(self, synapse_collection): + self._iter = iter(synapse_collection._datum) + + def __iter__(self): + return self + + def __next__(self): + return SynapseCollection(next(self._iter)) + + +class SynapseCollection: + """ + Class for Connections. + + `SynapseCollection` represents the connections of a network. The class supports indexing, iteration, length and + equality. You can get and set connection parameters by using the membership functions :py:func:`get()` and + :py:func:`set()`. By using the membership function :py:func:`sources()` you get an iterator over + source nodes, while :py:func:`targets()` returns an interator over the target nodes of the connections. + + A SynapseCollection is created by the :py:func:`.GetConnections` function. + """ + + _datum = None + + def __init__(self, data): + + if isinstance(data, list): + for datum in data: + if (not isinstance(datum, nestkernel.ConnectionObject)): + raise TypeError("Expected ConnectionObject.") + self._datum = data + elif data is None: + # We can have an empty SynapseCollection if there are no connections. + self._datum = data + else: + if (not isinstance(data, nestkernel.ConnectionObject)): + raise TypeError("Expected ConnectionObject.") + # self._datum needs to be a list of ConnectionObjects. + self._datum = [data] + + self.print_full = False + + def __iter__(self): + return SynapseCollectionIterator(self) + + def __len__(self): + if self._datum is None: + return 0 + return len(self._datum) + + def __eq__(self, other): + if not isinstance(other, SynapseCollection): + raise NotImplementedError() + + if self.__len__() != other.__len__(): + return False + self_get = self.get(['source', 'target', 'target_thread', + 'synapse_id', 'port']) + other_get = other.get(['source', 'target', 'target_thread', + 'synapse_id', 'port']) + if self_get != other_get: + return False + return True + + def __neq__(self, other): + if not isinstance(other, SynapseCollection): + raise NotImplementedError() + return not self == other + + def __getitem__(self, key): + if isinstance(key, slice): + return SynapseCollection(self._datum[key]) + else: + return SynapseCollection([self._datum[key]]) + + def __str__(self): + """ + Printing a `SynapseCollection` returns something of the form: + + source target synapse model weight delay + -------- -------- --------------- -------- ------- + 1 4 static_synapse 1.000 1.000 + 2 4 static_synapse 2.000 1.000 + 1 3 stdp_synapse 4.000 1.000 + 1 4 stdp_synapse 3.000 1.000 + 2 3 stdp_synapse 3.000 1.000 + 2 4 stdp_synapse 2.000 1.000 + + If your SynapseCollection has more than 36 elements, only the first and last 15 connections are printed. To + display all, first set `print_full = True`. + + :: + + conns = nest.GetConnections() + conns.print_full = True + print(conns) + """ + + def format_row_(s, t, sm, w, dly): + try: + return f'{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}' + except ValueError: + # Used when we have many connections and print_full=False + return f'{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}' + + MAX_SIZE_FULL_PRINT = 35 # 35 is arbitrarily chosen. + + params = self.get() + + if len(params) == 0: + return 'The synapse collection does not contain any connections.' + + srcs = params['source'] + trgt = params['target'] + wght = params['weight'] + dlay = params['delay'] + s_model = params['synapse_model'] + + if isinstance(srcs, int): + srcs = [srcs] + trgt = [trgt] + wght = [wght] + dlay = [dlay] + s_model = [s_model] + + src_h = 'source' + trg_h = 'target' + sm_h = 'synapse model' + w_h = 'weight' + d_h = 'delay' + + # Find maximum number of characters for each column, used to determine width of column + src_len = max(len(src_h) + 2, floor(log(max(srcs), 10))) + trg_len = max(len(trg_h) + 2, floor(log(max(trgt), 10))) + sm_len = max(len(sm_h) + 2, len(max(s_model, key=len))) + w_len = len(w_h) + 2 + d_len = len(d_h) + 2 + + # 35 is arbitrarily chosen. + if len(srcs) >= MAX_SIZE_FULL_PRINT and not self.print_full: + # u'\u22EE ' is the unicode for vertical ellipsis, used when we have many connections + srcs = srcs[:15] + [u'\u22EE '] + srcs[-15:] + trgt = trgt[:15] + [u'\u22EE '] + trgt[-15:] + wght = wght[:15] + [u'\u22EE '] + wght[-15:] + dlay = dlay[:15] + [u'\u22EE '] + dlay[-15:] + s_model = s_model[:15] + [u'\u22EE '] + s_model[-15:] + + headers = f'{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}' + '\n' + borders = '-'*src_len + ' ' + '-'*trg_len + ' ' + '-'*sm_len + ' ' + '-'*w_len + ' ' + '-'*d_len + '\n' + output = '\n'.join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) + result = headers + borders + output + + return result + + def __getattr__(self, attr): + if attr == 'distance': + dist = nestkernel.llapi_distance(self._datum) + super().__setattr__(attr, dist) + return self.distance + + return self.get(attr) + + def __setattr__(self, attr, value): + # `_datum` is the only property of SynapseCollection that should not be + # interpreted as a property of the model + if attr == '_datum' or attr == 'print_full': + super().__setattr__(attr, value) + else: + self.set({attr: value}) + + def sources(self): + """Returns iterator containing the source node IDs of the `SynapseCollection`.""" + sources = self.get('source') + if not isinstance(sources, (list, tuple)): + sources = (sources,) + return iter(sources) + + def targets(self): + """Returns iterator containing the target node IDs of the `SynapseCollection`.""" + targets = self.get('target') + if not isinstance(targets, (list, tuple)): + targets = (targets,) + return iter(targets) + + def get(self, keys=None, output=''): + """ + Return a parameter dictionary of the connections. + + If `keys` is a string, a list of values is returned, unless we have a + single connection, in which case the single value is returned. + `keys` may also be a list, in which case a dictionary with a list of + values is returned. + + Parameters + ---------- + keys : str or list, optional + String or a list of strings naming model properties. get + then returns a single value or a dictionary with lists of values + belonging to the given `keys`. + output : str, ['pandas','json'], optional + If the returned data should be in a Pandas DataFrame or in a + JSON serializable format. + + Returns + ------- + dict: + All parameters, or, if keys is a list of strings, a dictionary with + lists of corresponding parameters + type: + If keys is a string, the corresponding parameter(s) is returned + + + Raises + ------ + TypeError + If input params are of the wrong form. + KeyError + If the specified parameter does not exist for the connections. + + See Also + -------- + set + + Examples + -------- + + >>> conns.get() + {'delay': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + ... + 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} + + >>> conns.get('weight') + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + + >>> conns[0].get('weight') + 1.0 + + >>> nodes.get(['source', 'weight']) + {'source': [1, 1, 1, 2, 2, 2, 3, 3, 3], + 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} + """ + pandas_output = output == 'pandas' + if pandas_output and not HAVE_PANDAS: + raise ImportError('Pandas could not be imported') + + # Return empty dictionary if we have no connections or if we have done a nest.ResetKernel() + num_conns = GetKernelStatus('num_connections') # Has to be called first because it involves MPI communication. + if self.__len__() == 0 or num_conns == 0: + # Return empty tuple if get is called with an argument + return {} if keys is None else () + + if keys is None: + result = nestkernel.llapi_get_connection_status(self._datum) + elif isinstance(keys, str): + # Extracting the correct values will be done in restructure_data below + result = nestkernel.llapi_get_connection_status(self._datum) + elif is_iterable(keys): + result = [[d[key] for key in keys] for d in nestkernel.llapi_get_connection_status(self._datum)] + else: + raise TypeError("keys should be either a string or an iterable") + + # Need to restructure the data. + final_result = restructure_data(result, keys) + + if pandas_output: + index = (self.get('source') if self.__len__() > 1 else + (self.get('source'),)) + if isinstance(keys, str): + final_result = {keys: final_result} + final_result = pandas.DataFrame(final_result, index=index) + elif output == 'json': + final_result = to_json(final_result) + + return final_result + + def set(self, params=None, **kwargs): + """ + Set the parameters of the connections to `params`. + + If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values + can be single values or list of the same size as the `SynapseCollection`. + + Parameters + ---------- + params : str or dict or list + Dictionary of parameters (either lists or single values) or list of dictionaries of parameters + of same length as `SynapseCollection`. + kwargs : keyword argument pairs + Named arguments of parameters of the elements in the `SynapseCollection`. + + Raises + ------ + TypeError + If input params are of the wrong form. + KeyError + If the specified parameter does not exist for the connections. + + See Also + -------- + get + """ + + # This was added to ensure that the function is a nop (instead of, + # for instance, raising an exception) when applied to an empty + # SynapseCollection, or after having done a nest.ResetKernel(). + if self.__len__() == 0 or GetKernelStatus('network_size') == 0: + return + + if (isinstance(params, (list, tuple)) and + self.__len__() != len(params)): + raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__())) + + if kwargs and params is None: + params = kwargs + elif kwargs and params: + raise TypeError("must either provide params or kwargs, but not both.") + + if isinstance(params, dict): + node_params = self[0].get() + contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for + key, vals in params.items()] + + if any(contains_list): + temp_param = [{} for _ in range(self.__len__())] + + for key, vals in params.items(): + if not is_iterable(vals): + for temp_dict in temp_param: + temp_dict[key] = vals + else: + for i, temp_dict in enumerate(temp_param): + temp_dict[key] = vals[i] + params = temp_param + + nestkernel.llapi_set_connection_status(self._datum, params) + + def disconnect(self): + """ + Disconnect the connections in the `SynapseCollection`. + """ + sps(self._datum) + sr('Disconnect_a') + + +class CollocatedSynapses: + """ + Class for collocated synapse specifications. + + Wrapper around a list of specifications, used when calling :py:func:`.Connect`. + + Example + ------- + + :: + + nodes = nest.Create('iaf_psc_alpha', 3) + syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, + {'synapse_model': 'stdp_synapse'}, + {'synapse_model': 'stdp_synapse', 'alpha': 3.}) + nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) + + conns = nest.GetConnections() + + print(conns.alpha) + print(len(syn_spec)) + """ + + def __init__(self, *args): + self.syn_specs = args + + def __len__(self): + return len(self.syn_specs) + + +class Mask: + """ + Class for spatial masks. + + Masks are used when creating connections when nodes have spatial extent. A mask + describes the area of the pool population that shall be searched to find nodes to + connect to for any given node in the driver population. Masks are created using + the :py:func:`.CreateMask` command. + """ + + _datum = None + + # The constructor should not be called by the user + def __init__(self, datum): + """Masks must be created using the CreateMask command.""" + if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype": + raise TypeError("expected mask Datum") + self._datum = datum + + # Generic binary operation + def _binop(self, op, other): + if not isinstance(other, Mask): + raise NotImplementedError() + return sli_func(op, self._datum, other._datum) + + def __or__(self, other): + return self._binop("or", other) + + def __and__(self, other): + return self._binop("and", other) + + def __sub__(self, other): + return self._binop("sub", other) + + def Inside(self, point): + """ + Test if a point is inside a mask. + + Parameters + ---------- + point : tuple/list of float values + Coordinate of point + + Returns + ------- + out : bool + True if the point is inside the mask, False otherwise + """ + return sli_func("Inside", point, self._datum) + + +# TODO-PYNEST-NG: We may consider moving the entire (or most of) Parameter class to the cython level. +class Parameter: + """ + Class for parameters + + A parameter may be used as a probability kernel when creating + connections and nodes or as synaptic parameters (such as weight and delay). + Parameters are created using the :py:func:`.CreateParameter` command. + """ + + _datum = None + + # The constructor should not be called by the user + def __init__(self, datum): + """Parameters must be created using the CreateParameter command.""" + if not isinstance(datum, nestkernel.ParameterObject): + raise TypeError("expected low-level parameter object;" + " use the CreateParameter() function to create a Parameter") + self._datum = datum + + def _arg_as_parameter(self, arg): + if isinstance(arg, Parameter): + return arg + if isinstance(arg, (int, float)): + # Value for the constant parameter must be float. + return CreateParameter('constant', {'value': float(arg)}) + raise NotImplementedError() + + def __add__(self, other): + return nestkernel.llapi_add_parameter(self._datum, self._arg_as_parameter(other)._datum) + + def __radd__(self, other): + return self + other + + def __sub__(self, other): + return nestkernel.llapi_subtract_parameter(self._datum, self._arg_as_parameter(other)._datum) + + def __rsub__(self, other): + return self * (-1) + other + + def __neg__(self): + return self * (-1) + + def __mul__(self, other): + return nestkernel.llapi_multiply_parameter(self._datum, self._arg_as_parameter(other)._datum) + + def __rmul__(self, other): + return self * other + + def __div__(self, other): + return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) + + def __truediv__(self, other): + return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) + + def __pow__(self, exponent): + return nestkernel.llapi_pow_parameter(self._datum, self._arg_as_parameter(float(exponent))._datum) + + def __lt__(self, other): + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 0}) + + def __le__(self, other): + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 1}) + + def __eq__(self, other): + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 2}) + + def __ne__(self, other): + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 3}) + + def __ge__(self, other): + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 4}) + + def __gt__(self, other): + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 5}) + + def GetValue(self): + """ + Compute value of parameter. + + Returns + ------- + out : value + The value of the parameter + + See also + -------- + CreateParameter + + Example + ------- + :: + + import nest + + # normal distribution parameter + P = nest.CreateParameter('normal', {'mean': 0.0, 'std': 1.0}) + + # get out value + P.GetValue() + """ + return nestkernel.llapi_get_param_value(self._datum) + + def is_spatial(self): + return nestkernel.llapi_param_is_spatial(self._datum) + + def apply(self, spatial_nc, positions=None): + if positions is None: + return nestkernel.llapi_apply_parameter(self._datum, spatial_nc) + else: + if len(spatial_nc) != 1: + raise ValueError('The NodeCollection must contain a single node ID only') + if not isinstance(positions, (list, tuple)): + raise TypeError('Positions must be a list or tuple of positions') + for pos in positions: + if not isinstance(pos, (list, tuple, numpy.ndarray)): + raise TypeError('Each position must be a list or tuple') + if len(pos) != len(positions[0]): + raise ValueError('All positions must have the same number of dimensions') + return nestkernel.llapi_apply_parameter(self._datum, {'source': spatial_nc, 'targets': positions}) + + +class CmBase: + + def __init__(self, node_collection, elements): + if not isinstance(node_collection, NodeCollection): + raise TypeError(f'node_collection must be a NodeCollection, got {type(node_collection)}') + if not isinstance(elements, tuple): + raise TypeError(f'elements must be a tuple of dicts, got {type(elements)}') + self._elements = elements + self._node_collection = node_collection + + def __add__(self, other): + new_elements = list(self._elements) + if isinstance(other, dict): + new_elements += [other] + elif isinstance(other, (tuple, list)): + if not all(isinstance(d, dict) for d in other): + raise TypeError( + f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' + f'or other {self.__class__.__name__}') + new_elements += list(other) + elif isinstance(other, self.__class__): + new_elements += list(other._elements) + else: + raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' + f' or other {self.__class__.__name__}, got {type(other)}') + + return self.__class__(self._node_collection, tuple(new_elements)) + + def __iadd__(self, other): + if isinstance(other, dict): + new_elements = [other] + elif isinstance(other, (tuple, list)): + if not all(isinstance(d, dict) for d in other): + raise TypeError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' + f'or other {self.__class__.__name__}') + new_elements = list(other) + elif isinstance(other, self.__class__): + new_elements = list(other._elements) + else: + raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' + f' or other {self.__class__.__name__}, got {type(other)}') + self._node_collection.set({f'add_{self.__class__.__name__.lower()}': new_elements}) + return None # Flagging elements as added by returning None + + def __getitem__(self, key): + return self._elements[key] + + def __str__(self): + return str(self._elements) + + def get_tuple(self): + return self._elements + + +class Compartments(CmBase): + # No specialization here because all is done in the base class based on the class name. + pass + + +class Receptors(CmBase): + # No specialization here because all is done in the base class based on the class name. + pass + + +def serializable(data): + """Make data serializable for JSON. + + Parameters + ---------- + data : any + + Returns + ------- + data_serialized : str, int, float, list, dict + Data can be encoded to JSON + """ + + if isinstance(data, (numpy.ndarray, NodeCollection)): + return data.tolist() + if isinstance(data, SynapseCollection): + # Get full information from SynapseCollection + return serializable(data.get()) + if isinstance(data, (list, tuple)): + return [serializable(d) for d in data] + if isinstance(data, dict): + return dict([(key, serializable(value)) for key, value in data.items()]) + return data + + +def to_json(data, **kwargs): + """Serialize data to JSON. + + Parameters + ---------- + data : any + kwargs : keyword argument pairs + Named arguments of parameters for `json.dumps` function. + + Returns + ------- + data_json : str + JSON format of the data + """ + + data_serialized = serializable(data) + data_json = json.dumps(data_serialized, **kwargs) + return data_json diff --git a/pynest/nest/ll_api.py b/pynest/nest/ll_api.py index 6418838087..9eed5b04ae 100644 --- a/pynest/nest/ll_api.py +++ b/pynest/nest/ll_api.py @@ -174,11 +174,24 @@ def init(argv): if not quiet: print("NEST initialized successfully!") - # Dirty hack to get tab-completion for models in IPython. - try: - __IPYTHON__ - except NameError: - pass + # Dirty hack to get tab-completion for models in IPython. + try: + __IPYTHON__ + except NameError: + pass + else: + from .lib._hl_api_simulation import GetKernelStatus # noqa + keyword_lists = ( + "connection_rules", + "node_models", + "recording_backends", + "rng_types", + "stimulation_backends", + "synapse_models", + ) + for kwl in keyword_lists: + keyword.kwlist += GetKernelStatus(kwl) + else: from .lib.hl_api_simulation import GetKernelStatus # noqa diff --git a/pynest/nest/logic/__init__.py b/pynest/nest/logic/__init__.py index 63f9064edf..e39a4df3fe 100644 --- a/pynest/nest/logic/__init__.py +++ b/pynest/nest/logic/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from .hl_api_logic import * # noqa: F401,F403 +from ._hl_api_logic import * # noqa: F401,F403 diff --git a/pynest/nest/logic/_hl_api_logic.py b/pynest/nest/logic/_hl_api_logic.py new file mode 100644 index 0000000000..70d194ad8b --- /dev/null +++ b/pynest/nest/logic/_hl_api_logic.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_logic.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from ..lib._hl_api_types import CreateParameter +from .. import nestkernel_api as nestkernel + +__all__ = [ + 'conditional', +] + + +def conditional(condition, param_if_true, param_if_false): + """ + Yields one value or another, based on the condition. + + Parameters + ---------- + condition : Parameter + A comparing Parameter, created with the usual comparators. + param_if_true : [Parameter | float] + Value or Parameter used to get a value used if the condition evaluates to true. + param_if_false : [Parameter | float] + Value or Parameter used to get a value used if the condition evaluates to false. + + Returns + ------- + Parameter: + Object representing the conditional. + """ + if isinstance(param_if_true, (int, float)): + param_if_true = CreateParameter( + 'constant', {'value': float(param_if_true)}) + if isinstance(param_if_false, (int, float)): + param_if_false = CreateParameter( + 'constant', {'value': float(param_if_false)}) + return nestkernel.llapi_conditional_parameter(condition._datum, param_if_true._datum, param_if_false._datum) diff --git a/pynest/nest/math/__init__.py b/pynest/nest/math/__init__.py index b5c8b731c7..aa5e4bd220 100644 --- a/pynest/nest/math/__init__.py +++ b/pynest/nest/math/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from .hl_api_math import * # noqa: F401,F403 +from ._hl_api_math import * # noqa: F401,F403 diff --git a/pynest/nest/math/_hl_api_math.py b/pynest/nest/math/_hl_api_math.py new file mode 100644 index 0000000000..8397e4aa90 --- /dev/null +++ b/pynest/nest/math/_hl_api_math.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_math.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from .. import nestkernel_api as nestkernel + + +__all__ = [ + 'exp', + 'sin', + 'cos', + 'min', + 'max', + 'redraw', +] + +# TODO: Special cases when argument is a number? + + +def exp(parameter): + """ + Calculate the exponential of the parameter + + Parameters + ---------- + parameter : Parameter + Input Parameter. + + Returns + ------- + Parameter: + Object representing the exponential of the parameter. + """ + return nestkernel.llapi_exp_parameter(parameter._datum) + + +def sin(parameter): + """ + Calculate the sine of the parameter + + Parameters + ---------- + parameter : Parameter + Input Parameter. + + Returns + ------- + Parameter: + Object representing the sine of the parameter. + """ + return nestkernel.llapi_sin_parameter(parameter._datum) + + +def cos(parameter): + """ + Calculate the cosine of the parameter + + Parameters + ---------- + parameter : Parameter + Input Parameter. + + Returns + ------- + Parameter: + Object representing the cosine of the parameter. + """ + return nestkernel.llapi_cos_parameter(parameter._datum) + + +def min(parameter, value): + """ + Yields the smallest value of the value of a parameter and a given value + + Parameters + ---------- + parameter : Parameter + Input Parameter. + value : float + Value to compare against. + + Returns + ------- + Parameter: + Object yielding the smallest value. + """ + return nestkernel.llapi_min_parameter(parameter._datum, float(value)) + + +def max(parameter, value): + """ + Yields the largest value of the value of a parameter and a given value + + Parameters + ---------- + parameter : Parameter + Input Parameter. + value : float + Value to compare against. + + Returns + ------- + Parameter: + Object yielding the largest value. + """ + return nestkernel.llapi_max_parameter(parameter._datum, float(value)) + + +def redraw(parameter, min, max): + """ + Redraws the value of the parameter if it is outside of the given limits + + Both min and max values are included in the limit. If the number of redraws exceeds 1000, an error is thrown. + + Parameters + ---------- + parameter : Parameter + Input Parameter. + min : float + Lower bound of the value. + max : float + Upper bound of the value. + + Returns + ------- + Parameter: + Object redrawing the parameter until it can yield a value within the given limits. + """ + return nestkernel.llapi_redraw_parameter(parameter._datum, float(min), float(max)) diff --git a/pynest/nest/random/__init__.py b/pynest/nest/random/__init__.py index 03f911cd18..1eebdde30c 100644 --- a/pynest/nest/random/__init__.py +++ b/pynest/nest/random/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from .hl_api_random import * # noqa: F401,F403 +from ._hl_api_random import * # noqa: F401,F403 diff --git a/pynest/nest/random/_hl_api_random.py b/pynest/nest/random/_hl_api_random.py new file mode 100644 index 0000000000..5a279a85a0 --- /dev/null +++ b/pynest/nest/random/_hl_api_random.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_random.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from ..lib._hl_api_types import CreateParameter + +__all__ = [ + 'exponential', + 'lognormal', + 'normal', + 'uniform', + 'uniform_int', +] + + +def uniform(min=0.0, max=1.0): + """ + Draws samples from a uniform distribution. + + Samples are distributed uniformly in [min, max) (includes min, but excludes max). + + Note + ---- + See :ref:`this documentation ` for details on the effect + of time discretization on delays drawn from a uniform distribution. + + Parameters + ---------- + min : float, optional + Lower boundary of the sample interval. Default value is 0. + max : float, optional + Upper boundary of the sample interval. Default value is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('uniform', {'min': min, 'max': max}) + + +def uniform_int(max): + """ + Draws integer samples from a uniform distribution. + + Samples are distributed uniformly in [0, max) (includes 0, but excludes max). + + Parameters + ---------- + max : integer + Upper boundary of the sample interval. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('uniform_int', {'max': max}) + + +def normal(mean=0.0, std=1.0): + """ + Draws samples from a normal distribution. + + Parameters + ---------- + mean : float, optional + Mean of the distribution. Default value is 0. + std : float, optional + Standard deviation of the distribution. Default value is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('normal', {'mean': mean, 'std': std}) + + +def exponential(beta=1.0): + """ + Draws samples from an exponential distribution. + + Parameters + ---------- + beta : float, optional + Scale parameter the distribution. Default value is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('exponential', {'beta': beta}) + + +def lognormal(mean=0.0, std=1.0): + """ + Draws samples from a log-normal distribution. + + Parameters + ---------- + mean : float, optional + Mean value of the underlying normal distribution. Default value is 0. + std : float, optional + Standard deviation of the underlying normal distribution. Default value is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('lognormal', {'mean': mean, 'std': std}) diff --git a/pynest/nest/server/__init__.py b/pynest/nest/server/__init__.py index 0ea1b54eda..597a47da7a 100644 --- a/pynest/nest/server/__init__.py +++ b/pynest/nest/server/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from .hl_api_server import * # noqa: F401,F403 +from ._hl_api_server import * # noqa: F401,F403 diff --git a/pynest/nest/server/_hl_api_server.py b/pynest/nest/server/_hl_api_server.py new file mode 100644 index 0000000000..cbb8e11d4f --- /dev/null +++ b/pynest/nest/server/_hl_api_server.py @@ -0,0 +1,498 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_server.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import importlib +import inspect +import io +import sys + +import flask +from flask import Flask, request, jsonify +from flask_cors import CORS, cross_origin + +from werkzeug.exceptions import abort +from werkzeug.wrappers import Response + +import nest + +import RestrictedPython +import time + +import traceback + +from copy import deepcopy + +import os + +MODULES = os.environ.get('NEST_SERVER_MODULES', 'nest').split(',') +RESTRICTION_OFF = bool(os.environ.get('NEST_SERVER_RESTRICTION_OFF', False)) +EXCEPTION_ERROR_STATUS = 400 + +if RESTRICTION_OFF: + msg = 'NEST Server runs without a RestrictedPython trusted environment.' + print(f'***\n*** WARNING: {msg}\n***') + + +__all__ = [ + 'app', + 'do_exec', + 'set_mpi_comm', + 'run_mpi_app', + 'nestify', +] + +app = Flask(__name__) +CORS(app) + +mpi_comm = None + + +@app.route('/', methods=['GET']) +def index(): + return jsonify({ + 'nest': nest.__version__, + 'mpi': mpi_comm is not None, + }) + + +def do_exec(args, kwargs): + try: + source_code = kwargs.get('source', '') + source_cleaned = clean_code(source_code) + + locals_ = dict() + response = dict() + if RESTRICTION_OFF: + with Capturing() as stdout: + exec(source_cleaned, get_globals(), locals_) + if len(stdout) > 0: + response['stdout'] = '\n'.join(stdout) + else: + code = RestrictedPython.compile_restricted(source_cleaned, '', 'exec') # noqa + exec(code, get_restricted_globals(), locals_) + if '_print' in locals_: + response['stdout'] = ''.join(locals_['_print'].txt) + + if 'return' in kwargs: + if isinstance(kwargs['return'], list): + data = dict() + for variable in kwargs['return']: + data[variable] = locals_.get(variable, None) + else: + data = locals_.get(kwargs['return'], None) + response['data'] = nest.serializable(data) + return response + + except Exception as e: + for line in traceback.format_exception(*sys.exc_info()): + print(line, flush=True) + abort(Response(str(e), EXCEPTION_ERROR_STATUS)) + + +def log(call_name, msg): + msg = f'==> MASTER 0/{time.time():.7f} ({call_name}): {msg}' + print(msg, flush=True) + + +def do_call(call_name, args=[], kwargs={}): + """Call a PYNEST function or execute a script within the server. + + If the server is run serially (i.e., without MPI), this function + will do one of two things: If call_name is "exec", it will execute + the script given in args via do_exec(). If call_name is the name + of a PyNEST API function, it will call that function and pass args + and kwargs to it. + + If the server is run with MPI, this function will first communicate + the call type ("exec" or API call) and the args and kwargs to all + worker processes. Only then will it execute the call in the same + way as described above for the serial case. After the call, all + worker responses are collected, combined and returned. + + Please note that this function must only be called on the master + process (i.e., the task with rank 0) in a distributed scenario. + + """ + + if mpi_comm is not None: + assert mpi_comm.Get_rank() == 0 + + if mpi_comm is not None: + log(call_name, 'sending call bcast') + mpi_comm.bcast(call_name, root=0) + data = (args, kwargs) + log(call_name, f'sending data bcast, data={data}') + mpi_comm.bcast(data, root=0) + + if call_name == "exec": + master_response = do_exec(args, kwargs) + else: + call, args, kwargs = nestify(call_name, args, kwargs) + log(call_name, f'local call, args={args}, kwargs={kwargs}') + master_response = call(*args, **kwargs) + + response = [nest.serializable(master_response)] + if mpi_comm is not None: + log(call_name, 'waiting for response gather') + response = mpi_comm.gather(response[0], root=0) + log(call_name, f'received response gather, data={response}') + + return combine(call_name, response) + + +@app.route('/exec', methods=['GET', 'POST']) +@cross_origin() +def route_exec(): + """ Route to execute script in Python. + """ + + args, kwargs = get_arguments(request) + response = do_call('exec', args, kwargs) + return jsonify(response) + + +# -------------------------- +# RESTful API +# -------------------------- + +nest_calls = dir(nest) +nest_calls = list(filter(lambda x: not x.startswith('_'), nest_calls)) +nest_calls.sort() + + +@app.route('/api', methods=['GET']) +@cross_origin() +def route_api(): + """ Route to list call functions in NEST. + """ + return jsonify(nest_calls) + + +@app.route('/api/', methods=['GET', 'POST']) +@cross_origin() +def route_api_call(call): + """ Route to call function in NEST. + """ + print(f"\n{'='*40}\n", flush=True) + args, kwargs = get_arguments(request) + log("route_api_call", f"call={call}, args={args}, kwargs={kwargs}") + response = api_client(call, args, kwargs) + return jsonify(response) + + +# ---------------------- +# Helpers for the server +# ---------------------- + +class Capturing(list): + """ Monitor stdout contents i.e. print. + """ + def __enter__(self): + self._stdout = sys.stdout + sys.stdout = self._stringio = io.StringIO() + return self + + def __exit__(self, *args): + self.extend(self._stringio.getvalue().splitlines()) + del self._stringio # free up some memory + sys.stdout = self._stdout + + +def clean_code(source): + codes = source.split('\n') + code_cleaned = filter(lambda code: not (code.startswith('import') or code.startswith('from')), codes) # noqa + return '\n'.join(code_cleaned) + + +def get_arguments(request): + """ Get arguments from the request. + """ + args, kwargs = [], {} + if request.is_json: + json = request.get_json() + if isinstance(json, str) and len(json) > 0: + args = [json] + elif isinstance(json, list): + args = json + elif isinstance(json, dict): + kwargs = json + if 'args' in kwargs: + args = kwargs.pop('args') + elif len(request.form) > 0: + if 'args' in request.form: + args = request.form.getlist('args') + else: + kwargs = request.form.to_dict() + elif len(request.args) > 0: + if 'args' in request.args: + args = request.args.getlist('args') + else: + kwargs = request.args.to_dict() + return list(args), kwargs + + +def get_globals(): + """ Get globals for exec function. + """ + copied_globals = globals().copy() + + # Add modules to copied globals + modlist = [(module, importlib.import_module(module)) for module in MODULES] + modules = dict(modlist) + copied_globals.update(modules) + + return copied_globals + + +def get_or_error(func): + """ Wrapper to get data and status. + """ + def func_wrapper(call, args, kwargs): + try: + return func(call, args, kwargs) + except Exception as e: + for line in traceback.format_exception(*sys.exc_info()): + print(line, flush=True) + abort(Response(str(e), EXCEPTION_ERROR_STATUS)) + return func_wrapper + + +def get_restricted_globals(): + """ Get restricted globals for exec function. + """ + def getitem(obj, index): + typelist = (list, tuple, dict, nest.NodeCollection) + if obj is not None and type(obj) in typelist: + return obj[index] + msg = f"Error getting restricted globals: unidentified object '{obj}'." + raise TypeError(msg) + + restricted_builtins = RestrictedPython.safe_builtins.copy() + restricted_builtins.update(RestrictedPython.limited_builtins) + restricted_builtins.update(RestrictedPython.utility_builtins) + restricted_builtins.update(dict( + max=max, + min=min, + sum=sum, + time=time, + )) + + restricted_globals = dict( + __builtins__=restricted_builtins, + _print_=RestrictedPython.PrintCollector, + _getattr_=RestrictedPython.Guards.safer_getattr, + _getitem_=getitem, + _getiter_=iter, + _unpack_sequence_=RestrictedPython.Guards.guarded_unpack_sequence, + _write_=RestrictedPython.Guards.full_write_guard, + ) + + # Add modules to restricted globals + modlist = [(module, importlib.import_module(module)) for module in MODULES] + modules = dict(modlist) + restricted_globals.update(modules) + + return restricted_globals + + +def nestify(call_name, args, kwargs): + """Get the NEST API call and convert arguments if neccessary. + """ + + call = getattr(nest, call_name) + objectnames = ['nodes', 'source', 'target', 'pre', 'post'] + paramKeys = list(inspect.signature(call).parameters.keys()) + args = [nest.NodeCollection(arg) if paramKeys[idx] in objectnames + else arg for (idx, arg) in enumerate(args)] + for (key, value) in kwargs.items(): + if key in objectnames: + kwargs[key] = nest.NodeCollection(value) + + return call, args, kwargs + + +@get_or_error +def api_client(call_name, args, kwargs): + """ API Client to call function in NEST. + """ + + call = getattr(nest, call_name) + + if callable(call): + if 'inspect' in kwargs: + response = { + 'data': getattr(inspect, kwargs['inspect'])(call) + } + else: + response = do_call(call_name, args, kwargs) + else: + response = call + + return response + + +def set_mpi_comm(comm): + global mpi_comm + mpi_comm = comm + + +def run_mpi_app(host="127.0.0.1", port=52425): + # NEST crashes with a segmentation fault if the number of threads + # is changed from the outside. Calling run() with threaded=False + # prevents Flask from performing such changes. + app.run(host=host, port=port, threaded=False) + + +def combine(call_name, response): + """Combine responses from different MPI processes. + + In a distributed scenario, each MPI process creates its own share + of the response from the data available locally. To present a + coherent view on the reponse data for the caller, this data has to + be combined. + + If this function is run serially (i.e., without MPI), it just + returns the response data from the only process immediately. + + The type of the returned result can vary depending on the call + that produced it. + + The combination of results is based on a cascade of heuristics + based on the call that was issued and individual repsonse data: + * if all responses are None, the combined response will also just + be None + * for some specific calls, the responses are known to be the + same from the master and all workers. In this case, the + combined response is just the master response + * if the response list contains only a single actual response and + None otherwise, the combined response will be that one actual + response + * for calls to GetStatus on recording devices, the combined + response will be a merged dictionary in the sense that all + fields that contain a single value in the individual responsed + are kept as a single values, while lists will be appended in + order of appearance; dictionaries in the response are + recursively treated in the same way + * for calls to GetStatus on neurons, the combined response is just + the single dictionary returned by the process on which the + neuron is actually allocated + * if the response contains one list per process, the combined + response will be those lists concatenated and flattened. + + """ + + if mpi_comm is None: + return response[0] + + if all(v is None for v in response): + return None + + # return the master response if all responses are known to be the same + if call_name in ('exec', 'Create', 'GetDefaults', 'GetKernelStatus', + 'SetKernelStatus', 'SetStatus'): + return response[0] + + # return a single response if there is only one which is not None + filtered_response = list(filter(lambda x: x is not None, response)) + if len(filtered_response) == 1: + return filtered_response[0] + + # return a single merged dictionary if there are many of them + if all(type(v[0]) is dict for v in response): + return merge_dicts(response) + + # return a flattened list if the response only consists of lists + if all(type(v) is list for v in response): + return [item for lst in response for item in lst] + + log("combine()", f"ERROR: cannot combine response={response}") + msg = "Cannot combine data because of unknown reason" + raise Exception(msg) + + +def merge_dicts(response): + """Merge status dictionaries of recorders + + This function runs through a zipped list and performs the + following steps: + * sum up all n_events fields + * if recording to memory: merge the event dictionaries by joining + all contained arrays + * if recording to ascii: join filenames arrays + * take all other values directly from the device on the first + process + + """ + + result = [] + + for device_dicts in zip(*response): + + # TODO: either stip fields like thread, vp, thread_local_id, + # and local or make them lists that contain the values from + # all dicts. + + element_type = device_dicts[0]['element_type'] + + if element_type not in ('neuron', 'recorder', 'stimulator'): + msg = f'Cannot combine data of element with type "{element_type}".' + raise Exception(msg) + + if element_type == 'neuron': + tmp = list(filter(lambda status: status['local'], device_dicts)) + assert len(tmp) == 1 + result.append(tmp[0]) + + if element_type == 'recorder': + tmp = deepcopy(device_dicts[0]) + tmp['n_events'] = 0 + + for device_dict in device_dicts: + tmp['n_events'] += device_dict['n_events'] + + record_to = tmp['record_to'] + if record_to not in ('ascii', 'memory'): + msg = f'Cannot combine data when recording to "{record_to}".' + raise Exception(msg) + + if record_to == 'memory': + event_keys = tmp['events'].keys() + for key in event_keys: + tmp['events'][key] = [] + for device_dict in device_dicts: + for key in event_keys: + tmp['events'][key].extend(device_dict['events'][key]) + + if record_to == 'ascii': + tmp['filenames'] = [] + for device_dict in device_dicts: + tmp['filenames'].extend(device_dict['filenames']) + + result.append(tmp) + + if element_type == 'stimulator': + result.append(device_dicts[0]) + + return result + + +if __name__ == "__main__": + app.run() diff --git a/pynest/nest/spatial/__init__.py b/pynest/nest/spatial/__init__.py index 94e1fa6e5f..c2227b7373 100644 --- a/pynest/nest/spatial/__init__.py +++ b/pynest/nest/spatial/__init__.py @@ -21,8 +21,8 @@ import functools as _functools -from .hl_api_spatial import DistanceParameter as _DistanceParameter -from .hl_api_spatial import * # noqa: F401,F403 +from ._hl_api_spatial import DistanceParameter as _DistanceParameter +from ._hl_api_spatial import * # noqa: F401,F403 @_functools.lru_cache(maxsize=None) diff --git a/pynest/nest/spatial/_hl_api_spatial.py b/pynest/nest/spatial/_hl_api_spatial.py new file mode 100644 index 0000000000..395bfd49c9 --- /dev/null +++ b/pynest/nest/spatial/_hl_api_spatial.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_spatial.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import numpy as np +from ..lib._hl_api_types import CreateParameter, Parameter +from .. import nestkernel_api as nestkernel + + +__all__ = [ + 'distance', + 'grid', + 'free', + 'pos', + 'source_pos', + 'target_pos', +] + + +class DistanceParameter(Parameter): + """ + Object representing the distance between two nodes in space. + + If used alone, the DistanceObject represents simply the Euclidean + distance between two nodes. + + Alternatively the distance in a single dimension may be chosen. Three + properties are defined, x, y, and z, which represent the distance in + their respective dimensions. Note that the distance parameter can only + be used in contexts with two nodes, e.g. when connecting. + """ + + def __init__(self): + distance_parameter = CreateParameter('distance', {}) + super().__init__(distance_parameter._datum) + + @property + def x(self): + """Parameter representing the distance on the x-axis""" + return CreateParameter('distance', {'dimension': 1}) + + @property + def y(self): + """Parameter representing the distance on the y-axis""" + return CreateParameter('distance', {'dimension': 2}) + + @property + def z(self): + """Parameter representing the distance on the z-axis""" + return CreateParameter('distance', {'dimension': 3}) + + @staticmethod + def n(dimension): + """ + Distance in given dimension. + + Parameters + ---------- + dimension : int + Dimension in which to get the distance. + + Returns + ------- + Parameter: + Object yielding the distance in the given dimension. + """ + return CreateParameter('distance', {'dimension': dimension}) + + +distance = DistanceParameter() + + +class pos: + """ + Position of node in a specific dimension. + + Three properties are defined, x, y, and z, which represent the + position in their respective dimensions. Note that this parameter can + only be used in contexts with one node, e.g. when setting node status. + """ + x = CreateParameter('position', {'dimension': 0}) + y = CreateParameter('position', {'dimension': 1}) + z = CreateParameter('position', {'dimension': 2}) + + @staticmethod + def n(dimension): + """ + Position in given dimension. + + Parameters + ---------- + dimension : int + Dimension in which to get the position. + + Returns + ------- + Parameter: + Object yielding the position in the given dimension. + """ + return CreateParameter('position', {'dimension': dimension}) + + +class source_pos: + """ + Position of the source node in a specific dimension. + + Three properties are defined, x, y, and z, which represent the source + node position in their respective dimensions. Note that this parameter + can only be used in contexts with two nodes, e.g. when connecting. + """ + x = CreateParameter('position', {'dimension': 0, 'synaptic_endpoint': 1}) + y = CreateParameter('position', {'dimension': 1, 'synaptic_endpoint': 1}) + z = CreateParameter('position', {'dimension': 2, 'synaptic_endpoint': 1}) + + @staticmethod + def n(dimension): + """ + Position of source node in given dimension. + + Parameters + ---------- + dimension : int + Dimension in which to get the position. + + Returns + ------- + Parameter: + Object yielding the position in the given dimension. + """ + return CreateParameter('position', + {'dimension': dimension, 'synaptic_endpoint': 1}) + + +class target_pos: + """ + Position of the target node in a specific dimension. + + Three properties are defined, x, y, and z, which represent the target + node position in their respective dimensions. Note that this parameter + can only be used in contexts with two nodes, e.g. when connecting. + """ + x = CreateParameter('position', {'dimension': 0, 'synaptic_endpoint': 2}) + y = CreateParameter('position', {'dimension': 1, 'synaptic_endpoint': 2}) + z = CreateParameter('position', {'dimension': 2, 'synaptic_endpoint': 2}) + + @staticmethod + def n(dimension): + """ + Position of target node in given dimension. + + Parameters + ---------- + dimension : int + Dimension in which to get the position. + + Returns + ------- + Parameter: + Object yielding the position in the given dimension. + """ + return CreateParameter('position', + {'dimension': dimension, 'synaptic_endpoint': 2}) + + +class grid: + """ + Defines grid-based positions for nodes. + + Parameters + ---------- + shape : list + Two- or three-element list with the grid shape in two or three dimensions, respectively. + center : list, optional + Position of the center of the layer. + extent : list, optional + Extent of the layer in each dimension. + edge_wrap : bool, optional + Specifies periodic boundary conditions. + """ + + def __init__(self, shape, center=None, extent=None, edge_wrap=False): + self.shape = shape + self.center = center + self.extent = extent + self.edge_wrap = edge_wrap + + +class free: + """ + Defines positions for nodes based on a list of positions, or a Parameter object. + + Parameters + ---------- + pos : [list | Parameter] + Either a list of two- or three-element lists containing positions, depending on number of dimensions, + a two- or three-element list of Parameters, depending on number of dimensions, + or a single Parameter. + extent : list, optional + Extent of the layer in each dimension. + edge_wrap : bool, optional + Specifies periodic boundary conditions. + num_dimensions : int, optional + If a single Parameter is given as position, and no extent is + specified, the number of dimensions must be set explicitly. + """ + + def __init__(self, pos, extent=None, edge_wrap=False, num_dimensions=None): + if extent and num_dimensions: + raise TypeError( + 'extent and number of dimensions cannot be specified at the' + ' same time') + if isinstance(pos, (list, tuple, np.ndarray)): + if num_dimensions: + raise TypeError( + 'number of dimensions cannot be specified when using an' + ' array of positions') + if len(pos) == sum(isinstance(d, Parameter) for d in pos): + self.pos = self._parameter_list_to_dimension(pos, len(pos)) + else: + self.pos = pos + elif isinstance(pos, Parameter): + if extent: + num_dimensions = len(extent) + # Number of dimensions is unknown if it cannot be inferred from + # extent, or if it's not explicitly specified. + if not num_dimensions: + raise TypeError( + 'could not infer number of dimensions. Set ' + 'num_dimensions or extent when using Parameter as pos') + dim_parameters = [pos for _ in range(num_dimensions)] + self.pos = self._parameter_list_to_dimension(dim_parameters, num_dimensions) + else: + raise TypeError( + 'pos must be either an array of positions, or a Parameter') + + self.extent = extent + self.edge_wrap = edge_wrap + + def _parameter_list_to_dimension(self, dim_parameters, num_dimensions): + """Converts a list of Parameters to a dimension2d or dimension3d Parameter.""" + assert(len(dim_parameters) == num_dimensions) + if num_dimensions < 2 or num_dimensions > 3: + raise ValueError('Number of dimensions must be 2 or 3') + # The dimension2d and dimension3d Parameter stores a Parameter for + # each dimension. When creating positions for nodes, values from + # each parameter are fetched for the position vector. + return nestkernel.llapi_dimension_parameter([p._datum for p in dim_parameters]) diff --git a/pynest/nest/spatial_distributions/__init__.py b/pynest/nest/spatial_distributions/__init__.py index d667d86ff2..decb5a3c46 100644 --- a/pynest/nest/spatial_distributions/__init__.py +++ b/pynest/nest/spatial_distributions/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from .hl_api_spatial_distributions import * # noqa: F401,F403 +from ._hl_api_spatial_distributions import * # noqa: F401,F403 diff --git a/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py b/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py new file mode 100644 index 0000000000..96dd7835ef --- /dev/null +++ b/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# +# _hl_api_spatial_distributions.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from ..math import exp +from ..lib._hl_api_types import CreateParameter + +try: + import scipy.special + HAVE_SCIPY = True +except ImportError: + HAVE_SCIPY = False + + +__all__ = [ + 'exponential', + 'gaussian', + 'gaussian2D', + 'gamma', +] + + +def exponential(x, beta=1.0): + """ + Applies an exponential distribution on a Parameter. + + Parameters + ---------- + x : Parameter + Input Parameter. + beta : float, optional + Scale parameter. Default is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('exp_distribution', { + 'x': x, + 'beta': beta, + }) + + +def gaussian(x, mean=0.0, std=1.0): + """ + Applies a gaussian distribution on a Parameter. + + Parameters + ---------- + x : Parameter + Input Parameter. + mean : float, optional + Mean of the distribution. Default is 0.0. + std : float, optional + Standard deviation of the distribution. Default is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('gaussian', { + 'x': x, + 'mean': mean, + 'std': std, + }) + + +def gaussian2D(x, y, mean_x=0.0, mean_y=0.0, std_x=1.0, std_y=1.0, rho=0.0): + """ + Applies a bivariate gaussian distribution on two Parameters, representing values in the x and y direction. + + Parameters + ---------- + x : Parameter + Input Parameter for the x-direction. + y : Parameter + Input Parameter for the y-direction. + mean_x : float, optional + Mean of the distribution in the x-direction. Default is 0.0. + mean_y : float, optional + Mean of the distribution in the y-direction. Default is 0.0. + std_x : float, optional + Standard deviation of the distribution in the x-direction. Default is 1.0. + std_y : float, optional + Standard deviation of the distribution in the y-direction. Default is 1.0. + rho : float, optional + Correlation of x and y. Default is 0.0 + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('gaussian2d', { + 'x': x, + 'y': y, + 'mean_x': mean_x, + 'mean_y': mean_y, + 'std_x': std_x, + 'std_y': std_y, + 'rho': rho, + }) + + +def gamma(x, kappa=1.0, theta=1.0): + """ + Applies a gamma distribution on a Parameter. + + This function requires SciPy, and will raise an error if SciPy cannot be imported. + + Parameters + ---------- + x : Parameter + Input Parameter. + kappa : float, optional + Shape parameter. Default is 1.0. + theta : float, optional + Scale parameter. Default is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('gamma', { + 'x': x, + 'kappa': kappa, + 'theta': theta + }) diff --git a/pynest/nestkernel_api.pyx b/pynest/nestkernel_api.pyx index f4cffb88f9..925783f445 100644 --- a/pynest/nestkernel_api.pyx +++ b/pynest/nestkernel_api.pyx @@ -35,7 +35,7 @@ from libcpp.vector cimport vector import nest import numpy -from nest.lib.hl_api_exceptions import NESTErrors +from nest._lib.hl_api_exceptions import NESTErrors # cimport numpy From 6ef360e2f29913b2d972814e07abc5a8d757ffbc Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 11:51:23 +0100 Subject: [PATCH 02/17] Moved dead code to plot module --- pynest/nest/{ll_api.py => _ll_api.py} | 0 pynest/nest/plot/__init__.py | 0 pynest/nest/plot/_raster_plot.py | 355 ++++++++++++++++++ pynest/nest/plot/_visualization.py | 84 +++++ .../_voltage_trace.py} | 0 pynest/nest/versionchecker.py.in | 33 -- 6 files changed, 439 insertions(+), 33 deletions(-) rename pynest/nest/{ll_api.py => _ll_api.py} (100%) create mode 100644 pynest/nest/plot/__init__.py create mode 100644 pynest/nest/plot/_raster_plot.py create mode 100644 pynest/nest/plot/_visualization.py rename pynest/nest/{voltage_trace.py => plot/_voltage_trace.py} (100%) delete mode 100644 pynest/nest/versionchecker.py.in diff --git a/pynest/nest/ll_api.py b/pynest/nest/_ll_api.py similarity index 100% rename from pynest/nest/ll_api.py rename to pynest/nest/_ll_api.py diff --git a/pynest/nest/plot/__init__.py b/pynest/nest/plot/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pynest/nest/plot/_raster_plot.py b/pynest/nest/plot/_raster_plot.py new file mode 100644 index 0000000000..4fbb0cdf1e --- /dev/null +++ b/pynest/nest/plot/_raster_plot.py @@ -0,0 +1,355 @@ +# -*- coding: utf-8 -*- +# +# raster_plot.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" Functions for raster plotting.""" + +import nest +import numpy + +__all__ = [ + 'extract_events', + 'from_data', + 'from_device', + 'from_file', + 'from_file_numpy', + 'from_file_pandas' +] + + +def extract_events(data, time=None, sel=None): + """Extract all events within a given time interval. + + Both time and sel may be used at the same time such that all + events are extracted for which both conditions are true. + + Parameters + ---------- + data : list + Matrix such that + data[:,0] is a vector of all node_ids and + data[:,1] a vector with the corresponding time stamps. + time : list, optional + List with at most two entries such that + time=[t_max] extracts all events with t< t_max + time=[t_min, t_max] extracts all events with t_min <= t < t_max + sel : list, optional + List of node_ids such that + sel=[node_id1, ... , node_idn] extracts all events from these node_ids. + All others are discarded. + + Returns + ------- + numpy.array + List of events as (node_id, t) tuples + """ + val = [] + + if time: + t_max = time[-1] + if len(time) > 1: + t_min = time[0] + else: + t_min = 0 + + for v in data: + t = v[1] + node_id = v[0] + if time and (t < t_min or t >= t_max): + continue + if not sel or node_id in sel: + val.append(v) + + return numpy.array(val) + + +def from_data(data, sel=None, **kwargs): + """Plot raster plot from data array. + + Parameters + ---------- + data : list + Matrix such that + data[:,0] is a vector of all node_ids and + data[:,1] a vector with the corresponding time stamps. + sel : list, optional + List of node_ids such that + sel=[node_id1, ... , node_idn] extracts all events from these node_ids. + All others are discarded. + kwargs: + Parameters passed to _make_plot + """ + if len(data) == 0: + raise nest.kernel.NESTError("No data to plot.") + ts = data[:, 1] + d = extract_events(data, sel=sel) + ts1 = d[:, 1] + node_ids = d[:, 0] + + return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs) + + +def from_file(fname, **kwargs): + """Plot raster from file. + + Parameters + ---------- + fname : str or tuple(str) or list(str) + File name or list of file names + + If a list of files is given, the data from them is concatenated as if + it had been stored in a single file - useful when MPI is enabled and + data is logged separately for each MPI rank, for example. + kwargs: + Parameters passed to _make_plot + """ + if isinstance(fname, str): + fname = [fname] + + if isinstance(fname, (list, tuple)): + try: + global pandas + pandas = __import__('pandas') + from_file_pandas(fname, **kwargs) + except ImportError: + from_file_numpy(fname, **kwargs) + else: + print('fname should be one of str/list(str)/tuple(str).') + + +def from_file_pandas(fname, **kwargs): + """Use pandas.""" + data = None + for f in fname: + dataFrame = pandas.read_table(f, header=2, skipinitialspace=True) + newdata = dataFrame.values + + if data is None: + data = newdata + else: + data = numpy.concatenate((data, newdata)) + + return from_data(data, **kwargs) + + +def from_file_numpy(fname, **kwargs): + """Use numpy.""" + data = None + for f in fname: + newdata = numpy.loadtxt(f, skiprows=3) + + if data is None: + data = newdata + else: + data = numpy.concatenate((data, newdata)) + + return from_data(data, **kwargs) + + +def from_device(detec, **kwargs): + """ + Plot raster from a spike recorder. + + Parameters + ---------- + detec : TYPE + Description + kwargs: + Parameters passed to _make_plot + + Raises + ------ + nest.kernel.NESTError + """ + + type_id = nest.GetDefaults(detec.get('model'), 'type_id') + if not type_id == "spike_recorder": + raise nest.kernel.NESTError("Please provide a spike_recorder.") + + if detec.get('record_to') == "memory": + + ts, node_ids = _from_memory(detec) + + if not len(ts): + raise nest.kernel.NESTError("No events recorded!") + + if "title" not in kwargs: + kwargs["title"] = "Raster plot from device '%i'" % detec.get('global_id') + + if detec.get('time_in_steps'): + xlabel = "Steps" + else: + xlabel = "Time (ms)" + + return _make_plot(ts, ts, node_ids, node_ids, xlabel=xlabel, **kwargs) + + elif detec.get("record_to") == "ascii": + fname = detec.get("filenames") + return from_file(fname, **kwargs) + + else: + raise nest.kernel.NESTError("No data to plot. Make sure that \ + record_to is set to either 'ascii' or 'memory'.") + + +def _from_memory(detec): + ev = detec.get("events") + return ev["times"], ev["senders"] + + +def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0, + grayscale=False, title=None, xlabel=None): + """Generic plotting routine. + + Constructs a raster plot along with an optional histogram (common part in + all routines above). + + Parameters + ---------- + ts : list + All timestamps + ts1 : list + Timestamps corresponding to node_ids + node_ids : list + Global ids corresponding to ts1 + neurons : list + Node IDs of neurons to plot + hist : bool, optional + Display histogram + hist_binwidth : float, optional + Width of histogram bins + grayscale : bool, optional + Plot in grayscale + title : str, optional + Plot title + xlabel : str, optional + Label for x-axis + """ + import matplotlib.pyplot as plt + + plt.figure() + + if grayscale: + color_marker = ".k" + color_bar = "gray" + else: + color_marker = "." + color_bar = "blue" + + color_edge = "black" + + if xlabel is None: + xlabel = "Time (ms)" + + ylabel = "Neuron ID" + + if hist: + ax1 = plt.axes([0.1, 0.3, 0.85, 0.6]) + plotid = plt.plot(ts1, node_ids, color_marker) + plt.ylabel(ylabel) + plt.xticks([]) + xlim = plt.xlim() + + plt.axes([0.1, 0.1, 0.85, 0.17]) + t_bins = numpy.arange( + numpy.amin(ts), numpy.amax(ts), + float(hist_binwidth) + ) + n, _ = _histogram(ts, bins=t_bins) + num_neurons = len(numpy.unique(neurons)) + heights = 1000 * n / (hist_binwidth * num_neurons) + + plt.bar(t_bins, heights, width=hist_binwidth, color=color_bar, + edgecolor=color_edge) + plt.yticks([ + int(x) for x in + numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4) + ]) + plt.ylabel("Rate (Hz)") + plt.xlabel(xlabel) + plt.xlim(xlim) + plt.axes(ax1) + else: + plotid = plt.plot(ts1, node_ids, color_marker) + plt.xlabel(xlabel) + plt.ylabel(ylabel) + + if title is None: + plt.title("Raster plot") + else: + plt.title(title) + + plt.draw() + + return plotid + + +def _histogram(a, bins=10, bin_range=None, normed=False): + """Calculate histogram for data. + + Parameters + ---------- + a : list + Data to calculate histogram for + bins : int, optional + Number of bins + bin_range : TYPE, optional + Range of bins + normed : bool, optional + Whether distribution should be normalized + + Raises + ------ + ValueError + """ + from numpy import asarray, iterable, linspace, sort, concatenate + + a = asarray(a).ravel() + + if bin_range is not None: + mn, mx = bin_range + if mn > mx: + raise ValueError("max must be larger than min in range parameter") + + if not iterable(bins): + if bin_range is None: + bin_range = (a.min(), a.max()) + mn, mx = [mi + 0.0 for mi in bin_range] + if mn == mx: + mn -= 0.5 + mx += 0.5 + bins = linspace(mn, mx, bins, endpoint=False) + else: + if (bins[1:] - bins[:-1] < 0).any(): + raise ValueError("bins must increase monotonically") + + # best block size probably depends on processor cache size + block = 65536 + n = sort(a[:block]).searchsorted(bins) + for i in range(block, a.size, block): + n += sort(a[i:i + block]).searchsorted(bins) + n = concatenate([n, [len(a)]]) + n = n[1:] - n[:-1] + + if normed: + db = bins[1] - bins[0] + return 1.0 / (a.size * db) * n, bins + else: + return n, bins diff --git a/pynest/nest/plot/_visualization.py b/pynest/nest/plot/_visualization.py new file mode 100644 index 0000000000..66eccd42e6 --- /dev/null +++ b/pynest/nest/plot/_visualization.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# +# visualization.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions to visualize a network built in NEST. +""" + +import pydot +import nest + +__all__ = [ + 'plot_network', +] + + +def plot_network(nodes, filename, ext_conns=False, + plot_modelnames=False): + """Plot the given nodes and the connections that originate from + them. + + This function depends on the availability of the pydot module. + + Simplified version for NEST 3. + + Parameters + ---------- + nodes : NodeCollection + NodeCollection containing node IDs of nodes to plot + filename : str + Filename to save the plot to. Can end either in .pdf or .png to + determine the type of the output. + ext_conns : bool, optional + Draw connections to targets that are not in nodes. If it is True, + these are drawn to a node named 'ext'. + plot_modelnames : bool, optional + Description + + Raises + ------ + nest.kernel.NESTError + """ + + if len(nodes) == 0: + nest.kernel.NESTError("nodes must at least contain one node") + + if not isinstance(nodes, nest.NodeCollection): + raise nest.kernel.NESTError("nodes must be a NodeCollection") + + if ext_conns: + raise NotImplementedError('ext_conns') + if plot_modelnames: + raise NotImplementedError('plot_modelnames') + + conns = nest.GetConnections(nodes) + + graph = pydot.Dot(rankdir='LR', ranksep='5') + for source, target in zip(conns.sources(), conns.targets()): + graph.add_edge(pydot.Edge(str(source), str(target))) + + filetype = filename.rsplit(".", 1)[1] + if filetype == "pdf": + graph.write_pdf(filename) + elif filetype == "png": + graph.write_png(filename) + else: + raise nest.kernel.NESTError("Filename must end in '.png' or '.pdf'.") diff --git a/pynest/nest/voltage_trace.py b/pynest/nest/plot/_voltage_trace.py similarity index 100% rename from pynest/nest/voltage_trace.py rename to pynest/nest/plot/_voltage_trace.py diff --git a/pynest/nest/versionchecker.py.in b/pynest/nest/versionchecker.py.in deleted file mode 100644 index 0d71dde2aa..0000000000 --- a/pynest/nest/versionchecker.py.in +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# -# versionchecker.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - - -""" -Check that the Python compiletime and runtime versions match. - -""" - -v_major_mismatch = sys.version_info.major != @Python_VERSION_MAJOR@ -v_minor_mismatch = sys.version_info.minor != @Python_VERSION_MINOR@ -if v_major_mismatch or v_minor_mismatch: - msg = ("Python runtime version does not match 'nest' compiletime version. " - + "Please use Python @Python_VERSION_MAJOR@.@Python_VERSION_MINOR@.") - raise Exception(msg) From 7392140c8d76ad2ef72369161649a3b61d3b23cb Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 12:06:19 +0100 Subject: [PATCH 03/17] Completed "privatization" of pynest --- CMakeLists.txt | 6 - pynest/CMakeLists.txt | 1 - pynest/nest/__init__.py | 5 +- pynest/nest/_ll_api.py | 7 +- pynest/nest/lib/_hl_api_connection_helpers.py | 198 ++++++++++++------ pynest/nest/lib/_hl_api_connections.py | 117 +++++++---- pynest/nest/lib/_hl_api_models.py | 22 +- pynest/nest/lib/_hl_api_nodes.py | 50 +++-- pynest/nest/lib/_hl_api_parallel_computing.py | 20 +- pynest/nest/lib/_hl_api_simulation.py | 47 +++-- pynest/nest/lib/_hl_api_types.py | 2 +- pynest/nest/plot/__init__.py | 26 +++ pynest/nest/plot/_raster_plot.py | 124 ++++++----- pynest/nest/plot/_visualization.py | 15 +- pynest/nest/plot/_voltage_trace.py | 3 +- testsuite/pytests/test_tsodyks2_synapse.py | 4 +- testsuite/pytests/test_urbanczik_synapse.py | 9 +- testsuite/pytests/test_visualization.py | 14 +- 18 files changed, 408 insertions(+), 262 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ce789b8a52..c1b1db0eaf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -342,12 +342,6 @@ configure_file( "${PROJECT_BINARY_DIR}/doc/fulldoc.conf" @ONLY ) -configure_file( - "${PROJECT_SOURCE_DIR}/pynest/nest/versionchecker.py.in" - "${PROJECT_BINARY_DIR}/pynest/nest/versionchecker.py" @ONLY -) - - ################################################################################ ################## Install Extra Files ################## ################################################################################ diff --git a/pynest/CMakeLists.txt b/pynest/CMakeLists.txt index 57bcd92708..8f2b696c7d 100644 --- a/pynest/CMakeLists.txt +++ b/pynest/CMakeLists.txt @@ -50,7 +50,6 @@ if ( HAVE_PYTHON ) install(DIRECTORY nest/ ${PROJECT_BINARY_DIR}/pynest/nest/ DESTINATION ${CMAKE_INSTALL_PREFIX}/${PYEXECDIR}/nest - PATTERN "versionchecker.py.in" EXCLUDE ) install( TARGETS nestkernel_api DESTINATION ${PYEXECDIR}/nest/ ) diff --git a/pynest/nest/__init__.py b/pynest/nest/__init__.py index c0b91b3bbb..54999d19ca 100644 --- a/pynest/nest/__init__.py +++ b/pynest/nest/__init__.py @@ -61,14 +61,13 @@ import sys # noqa import types # noqa -from .ll_api import KernelAttribute # noqa +from ._ll_api import KernelAttribute # noqa try: import versionchecker # noqa: F401 except ImportError: pass - class NestModule(types.ModuleType): """ A module class for the ``nest`` root module to control the dynamic generation @@ -81,7 +80,7 @@ class NestModule(types.ModuleType): from . import math # noqa from . import random # noqa from . import spatial_distributions # noqa - from .ll_api import set_communicator + from ._ll_api import set_communicator def __init__(self, name): super().__init__(name) diff --git a/pynest/nest/_ll_api.py b/pynest/nest/_ll_api.py index 9eed5b04ae..a3053d6a9b 100644 --- a/pynest/nest/_ll_api.py +++ b/pynest/nest/_ll_api.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# ll_api.py +# _ll_api.py # # This file is part of NEST. # @@ -51,7 +51,7 @@ sys.setdlopenflags(os.RTLD_NOW | os.RTLD_GLOBAL) from . import nestkernel_api as nestkernel # noqa -from .lib.hl_api_exceptions import NESTError, NESTErrors +from .lib._hl_api_exceptions import NESTError, NESTErrors __all__ = [ "set_communicator", @@ -76,7 +76,7 @@ def set_communicator(comm): """ if "mpi4py" not in sys.modules: - raise NESTError("set_communicator: " "mpi4py not loaded.") + raise NESTError("set_communicator: mpi4py not loaded.") # TODO-PYNEST-NG: set_communicator # engine.set_communicator(comm) @@ -181,6 +181,7 @@ def init(argv): pass else: from .lib._hl_api_simulation import GetKernelStatus # noqa + keyword_lists = ( "connection_rules", "node_models", diff --git a/pynest/nest/lib/_hl_api_connection_helpers.py b/pynest/nest/lib/_hl_api_connection_helpers.py index a19058f934..c879f76d13 100644 --- a/pynest/nest/lib/_hl_api_connection_helpers.py +++ b/pynest/nest/lib/_hl_api_connection_helpers.py @@ -27,18 +27,18 @@ import copy import numpy as np -from ..ll_api import * +from .._ll_api import * from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel from ._hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter from ._hl_api_exceptions import NESTErrors __all__ = [ - '_connect_layers_needed', - '_connect_spatial', - '_process_conn_spec', - '_process_spatial_projections', - '_process_syn_spec', + "_connect_layers_needed", + "_connect_spatial", + "_process_conn_spec", + "_process_spatial_projections", + "_process_syn_spec", ] @@ -46,9 +46,9 @@ def _process_conn_spec(conn_spec): """Processes the connectivity specifications from None, string or dictionary to a dictionary.""" if conn_spec is None: # Use default conn_spec - return {'rule': 'all_to_all'} + return {"rule": "all_to_all"} elif isinstance(conn_spec, str): - processed_conn_spec = {'rule': conn_spec} + processed_conn_spec = {"rule": conn_spec} return processed_conn_spec elif isinstance(conn_spec, dict): return conn_spec @@ -72,9 +72,11 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar if isinstance(syn_spec, str): return {"synapse_model": syn_spec} - rule = conn_spec['rule'] + rule = conn_spec["rule"] if isinstance(syn_spec, dict): - if "synapse_model" in syn_spec and not isinstance(syn_spec["synapse_model"], str): + if "synapse_model" in syn_spec and not isinstance( + syn_spec["synapse_model"], str + ): raise kernel.NESTError("'synapse_model' must be a string") for key, value in syn_spec.items(): # if value is a list, it is converted to a numpy array @@ -83,59 +85,73 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar if isinstance(value, (np.ndarray, np.generic)): if len(value.shape) == 1: - if rule == 'one_to_one': + if rule == "one_to_one": if value.shape[0] != prelength: if use_connect_arrays: raise kernel.NESTError( - "'{}' has to be an array of dimension {}.".format(key, prelength)) + "'{}' has to be an array of dimension {}.".format( + key, prelength + ) + ) else: raise kernel.NESTError( "'{}' has to be an array of dimension {}, a scalar or a dictionary.".format( - key, prelength)) + key, prelength + ) + ) else: syn_spec[key] = value - elif rule == 'fixed_total_number': - if ('N' in conn_spec and value.shape[0] != conn_spec['N']): + elif rule == "fixed_total_number": + if "N" in conn_spec and value.shape[0] != conn_spec["N"]: raise kernel.NESTError( "'{}' has to be an array of dimension {}, a scalar or a dictionary".format( - key, conn_spec['N'])) + key, conn_spec["N"] + ) + ) else: syn_spec[key] = value else: raise kernel.NESTError( "'{}' has the wrong type. One-dimensional parameter arrays can only be used in " - "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format(key)) + "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format( + key + ) + ) elif len(value.shape) == 2: - if rule == 'all_to_all': + if rule == "all_to_all": if value.shape[0] != postlength or value.shape[1] != prelength: raise kernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_target x n_sources), a scalar " - "or a dictionary.".format(key, postlength, prelength)) + "or a dictionary.".format(key, postlength, prelength) + ) else: syn_spec[key] = value.flatten() - elif rule == 'fixed_indegree': - indegree = conn_spec['indegree'] - if value.shape[0] != postlength or \ - value.shape[1] != indegree: + elif rule == "fixed_indegree": + indegree = conn_spec["indegree"] + if value.shape[0] != postlength or value.shape[1] != indegree: raise kernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_target x indegree), a scalar " - "or a dictionary.".format(key, postlength, indegree)) + "or a dictionary.".format(key, postlength, indegree) + ) else: syn_spec[key] = value.flatten() - elif rule == 'fixed_outdegree': - outdegree = conn_spec['outdegree'] - if value.shape[0] != prelength or \ - value.shape[1] != outdegree: + elif rule == "fixed_outdegree": + outdegree = conn_spec["outdegree"] + if value.shape[0] != prelength or value.shape[1] != outdegree: raise kernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_sources x outdegree), a scalar " - "or a dictionary.".format(key, prelength, outdegree)) + "or a dictionary.".format(key, prelength, outdegree) + ) else: syn_spec[key] = value.flatten() else: raise kernel.NESTError( "'{}' has the wrong type. Two-dimensional parameter arrays can only be used in " - "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format(key)) + "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format( + key + ) + ) # check that "synapse_model" is there for use_connect_arrays if use_connect_arrays and "synapse_model" not in syn_spec: @@ -152,53 +168,84 @@ def _process_spatial_projections(conn_spec, syn_spec): Processes the connection and synapse specifications to a single dictionary for the SLI function `ConnectLayers`. """ - allowed_conn_spec_keys = ['mask', 'allow_multapses', 'allow_autapses', 'rule', - 'indegree', 'outdegree', 'p', 'use_on_source', 'allow_oversized_mask'] - allowed_syn_spec_keys = ['weight', 'delay', 'synapse_model', 'synapse_label', 'receptor_type'] + allowed_conn_spec_keys = [ + "mask", + "allow_multapses", + "allow_autapses", + "rule", + "indegree", + "outdegree", + "p", + "use_on_source", + "allow_oversized_mask", + ] + allowed_syn_spec_keys = [ + "weight", + "delay", + "synapse_model", + "synapse_label", + "receptor_type", + ] for key in conn_spec.keys(): if key not in allowed_conn_spec_keys: - raise ValueError("'{}' is not allowed in conn_spec when connecting with mask or kernel".format(key)) + raise ValueError( + "'{}' is not allowed in conn_spec when connecting with mask or kernel".format( + key + ) + ) projections = {} projections.update(conn_spec) - if 'p' in conn_spec: - projections['kernel'] = projections.pop('p') + if "p" in conn_spec: + projections["kernel"] = projections.pop("p") if syn_spec is not None: if isinstance(syn_spec, CollocatedSynapses): for syn_list in syn_spec.syn_specs: for key in syn_list.keys(): if key not in allowed_syn_spec_keys: raise ValueError( - "'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key)) - projections.update({'synapse_parameters': syn_spec.syn_specs}) + "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( + key + ) + ) + projections.update({"synapse_parameters": syn_spec.syn_specs}) else: for key in syn_spec.keys(): if key not in allowed_syn_spec_keys: - raise ValueError("'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key)) + raise ValueError( + "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( + key + ) + ) projections.update(syn_spec) - if conn_spec['rule'] == 'fixed_indegree': - if 'use_on_source' in conn_spec: - raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") - projections['connection_type'] = 'pairwise_bernoulli_on_source' - projections['number_of_connections'] = projections.pop('indegree') - elif conn_spec['rule'] == 'fixed_outdegree': - if 'use_on_source' in conn_spec: - raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") - projections['connection_type'] = 'pairwise_bernoulli_on_target' - projections['number_of_connections'] = projections.pop('outdegree') - elif conn_spec['rule'] == 'pairwise_bernoulli': - if ('use_on_source' in conn_spec and - conn_spec['use_on_source']): - projections['connection_type'] = 'pairwise_bernoulli_on_source' - projections.pop('use_on_source') + if conn_spec["rule"] == "fixed_indegree": + if "use_on_source" in conn_spec: + raise ValueError( + "'use_on_source' can only be set when using pairwise_bernoulli" + ) + projections["connection_type"] = "pairwise_bernoulli_on_source" + projections["number_of_connections"] = projections.pop("indegree") + elif conn_spec["rule"] == "fixed_outdegree": + if "use_on_source" in conn_spec: + raise ValueError( + "'use_on_source' can only be set when using pairwise_bernoulli" + ) + projections["connection_type"] = "pairwise_bernoulli_on_target" + projections["number_of_connections"] = projections.pop("outdegree") + elif conn_spec["rule"] == "pairwise_bernoulli": + if "use_on_source" in conn_spec and conn_spec["use_on_source"]: + projections["connection_type"] = "pairwise_bernoulli_on_source" + projections.pop("use_on_source") else: - projections['connection_type'] = 'pairwise_bernoulli_on_target' - if 'use_on_source' in projections: - projections.pop('use_on_source') + projections["connection_type"] = "pairwise_bernoulli_on_target" + if "use_on_source" in projections: + projections.pop("use_on_source") else: - raise kernel.NESTError("When using kernel or mask, the only possible connection rules are " - "'pairwise_bernoulli', 'fixed_indegree', or 'fixed_outdegree'") - projections.pop('rule') + raise kernel.NESTError( + "When using kernel or mask, the only possible connection rules are " + "'pairwise_bernoulli', 'fixed_indegree', or 'fixed_outdegree'" + ) + projections.pop("rule") return projections @@ -210,10 +257,12 @@ def _connect_layers_needed(conn_spec, syn_spec): if isinstance(item, Parameter) and item.is_spatial(): return True # We must use ConnectLayers in some additional cases. - rule_is_bernoulli = 'pairwise_bernoulli' in str(conn_spec['rule']) - if ('mask' in conn_spec or - ('p' in conn_spec and not rule_is_bernoulli) or - 'use_on_source' in conn_spec): + rule_is_bernoulli = "pairwise_bernoulli" in str(conn_spec["rule"]) + if ( + "mask" in conn_spec + or ("p" in conn_spec and not rule_is_bernoulli) + or "use_on_source" in conn_spec + ): return True # If a syn_spec entry is based on spatial properties, we must use ConnectLayers. if isinstance(syn_spec, dict): @@ -221,7 +270,12 @@ def _connect_layers_needed(conn_spec, syn_spec): if isinstance(item, Parameter) and item.is_spatial(): return True elif isinstance(syn_spec, CollocatedSynapses): - return any([_connect_layers_needed(conn_spec, syn_param) for syn_param in syn_spec.syn_specs]) + return any( + [ + _connect_layers_needed(conn_spec, syn_param) + for syn_param in syn_spec.syn_specs + ] + ) # If we get here, there is not need to use ConnectLayers. return False @@ -258,8 +312,11 @@ def _process_input_nodes(pre, post, conn_spec): use_connect_arrays = False # check for 'one_to_one' conn_spec - one_to_one_cspec = (conn_spec if not isinstance(conn_spec, dict) - else conn_spec.get('rule', 'all_to_all') == 'one_to_one') + one_to_one_cspec = ( + conn_spec + if not isinstance(conn_spec, dict) + else conn_spec.get("rule", "all_to_all") == "one_to_one" + ) # check and convert input types pre_is_nc, post_is_nc = True, True @@ -282,7 +339,8 @@ def _process_input_nodes(pre, post, conn_spec): if len(pre) != len(post): raise NESTErrors.ArgumentType( "Connect", - "If `pre` or `post` contain non-unique IDs, then they must have the same length.") + "If `pre` or `post` contain non-unique IDs, then they must have the same length.", + ) # convert to arrays pre = np.asarray(pre) @@ -302,6 +360,8 @@ def _process_input_nodes(pre, post, conn_spec): use_connect_arrays = True if use_connect_arrays and not one_to_one_cspec: - raise ValueError("When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'.") + raise ValueError( + "When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'." + ) return use_connect_arrays, pre, post diff --git a/pynest/nest/lib/_hl_api_connections.py b/pynest/nest/lib/_hl_api_connections.py index 16b2e02444..17f7ba3909 100644 --- a/pynest/nest/lib/_hl_api_connections.py +++ b/pynest/nest/lib/_hl_api_connections.py @@ -25,26 +25,30 @@ import numpy -from ..ll_api import connect_arrays +from .._ll_api import connect_arrays from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ._hl_api_connection_helpers import (_process_input_nodes, _connect_layers_needed, - _connect_spatial, _process_conn_spec, - _process_spatial_projections, _process_syn_spec) +from ._hl_api_connection_helpers import ( + _process_input_nodes, + _connect_layers_needed, + _connect_spatial, + _process_conn_spec, + _process_spatial_projections, + _process_syn_spec, +) from ._hl_api_nodes import Create from ._hl_api_parallel_computing import NumProcesses from ._hl_api_types import NodeCollection, SynapseCollection, Mask, Parameter __all__ = [ - 'Connect', - 'Disconnect', - 'GetConnections', + "Connect", + "Disconnect", + "GetConnections", ] -def GetConnections(source=None, target=None, synapse_model=None, - synapse_label=None): +def GetConnections(source=None, target=None, synapse_model=None, synapse_label=None): """Return a `SynapseCollection` representing the connection identifiers. Any combination of `source`, `target`, `synapse_model` and @@ -83,29 +87,28 @@ def GetConnections(source=None, target=None, synapse_model=None, if source is not None: if isinstance(source, NodeCollection): - params['source'] = source + params["source"] = source else: raise TypeError("source must be NodeCollection.") if target is not None: if isinstance(target, NodeCollection): - params['target'] = target + params["target"] = target else: raise TypeError("target must be NodeCollection.") if synapse_model is not None: - params['synapse_model'] = synapse_model + params["synapse_model"] = synapse_model if synapse_label is not None: - params['synapse_label'] = synapse_label + params["synapse_label"] = synapse_label conns = nestkernel.llapi_get_connections(params) return conns -def Connect(pre, post, conn_spec=None, syn_spec=None, - return_synapsecollection=False): +def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=False): """ Connect `pre` nodes to `post` nodes. @@ -203,17 +206,22 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, # If syn_spec is given, its contents are checked, and if needed converted # to the right formats. processed_syn_spec = _process_syn_spec( - syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays) + syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays + ) # If pre and post are arrays of node IDs, and conn_spec is unspecified, # the node IDs are connected one-to-one. if use_connect_arrays: if return_synapsecollection: - raise ValueError("SynapseCollection cannot be returned when connecting two arrays of node IDs") + raise ValueError( + "SynapseCollection cannot be returned when connecting two arrays of node IDs" + ) if processed_syn_spec is None: - raise ValueError("When connecting two arrays of node IDs, the synapse specification dictionary must " - "be specified and contain at least the synapse model.") + raise ValueError( + "When connecting two arrays of node IDs, the synapse specification dictionary must " + "be specified and contain at least the synapse model." + ) # In case of misspelling if "weights" in processed_syn_spec: @@ -221,22 +229,37 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, if "delays" in processed_syn_spec: raise ValueError("To specify delays, use 'delay' in syn_spec.") - weights = numpy.array(processed_syn_spec['weight']) if 'weight' in processed_syn_spec else None - delays = numpy.array(processed_syn_spec['delay']) if 'delay' in processed_syn_spec else None + weights = ( + numpy.array(processed_syn_spec["weight"]) + if "weight" in processed_syn_spec + else None + ) + delays = ( + numpy.array(processed_syn_spec["delay"]) + if "delay" in processed_syn_spec + else None + ) try: - synapse_model = processed_syn_spec['synapse_model'] + synapse_model = processed_syn_spec["synapse_model"] except KeyError: - raise ValueError("When connecting two arrays of node IDs, the synapse specification dictionary must " - "contain a synapse model.") + raise ValueError( + "When connecting two arrays of node IDs, the synapse specification dictionary must " + "contain a synapse model." + ) # Split remaining syn_spec entries to key and value arrays - reduced_processed_syn_spec = {k: processed_syn_spec[k] - for k in set(processed_syn_spec.keys()).difference( - set(('weight', 'delay', 'synapse_model')))} + reduced_processed_syn_spec = { + k: processed_syn_spec[k] + for k in set(processed_syn_spec.keys()).difference( + set(("weight", "delay", "synapse_model")) + ) + } if len(reduced_processed_syn_spec) > 0: - syn_param_keys = numpy.array(list(reduced_processed_syn_spec.keys()), dtype=numpy.string_) + syn_param_keys = numpy.array( + list(reduced_processed_syn_spec.keys()), dtype=numpy.string_ + ) syn_param_values = numpy.zeros([len(reduced_processed_syn_spec), len(pre)]) for i, value in enumerate(reduced_processed_syn_spec.values()): @@ -245,7 +268,9 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, syn_param_keys = None syn_param_values = None - connect_arrays(pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values) + connect_arrays( + pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values + ) return if not isinstance(pre, NodeCollection): @@ -262,10 +287,14 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, raise TypeError("Presynaptic NodeCollection must have spatial information") # Create the projection dictionary - spatial_projections = _process_spatial_projections(processed_conn_spec, processed_syn_spec) + spatial_projections = _process_spatial_projections( + processed_conn_spec, processed_syn_spec + ) _connect_spatial(pre._datum, post._datum, spatial_projections) else: - nestkernel.llapi_connect(pre._datum, post._datum, processed_conn_spec, processed_syn_spec) + nestkernel.llapi_connect( + pre._datum, post._datum, processed_conn_spec, processed_syn_spec + ) if return_synapsecollection: return GetConnections(pre, post) @@ -335,25 +364,33 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): if len(args) == 1: synapsecollection = args[0] if not isinstance(synapsecollection, SynapseCollection): - raise TypeError('Arguments must be either a SynapseCollection or two NodeCollections') + raise TypeError( + "Arguments must be either a SynapseCollection or two NodeCollections" + ) if conn_spec is not None or syn_spec is not None: - raise ValueError('When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified') + raise ValueError( + "When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified" + ) synapsecollection.disconnect() elif len(args) == 2: # Fill default values - conn_spec = 'one_to_one' if conn_spec is None else conn_spec - syn_spec = 'static_synapse' if syn_spec is None else syn_spec + conn_spec = "one_to_one" if conn_spec is None else conn_spec + syn_spec = "static_synapse" if syn_spec is None else syn_spec if is_string(conn_spec): - conn_spec = {'rule': conn_spec} + conn_spec = {"rule": conn_spec} if is_string(syn_spec): - syn_spec = {'synapse_model': syn_spec} + syn_spec = {"synapse_model": syn_spec} pre, post = args if not isinstance(pre, NodeCollection) or not isinstance(post, NodeCollection): - raise TypeError('Arguments must be either a SynapseCollection or two NodeCollections') + raise TypeError( + "Arguments must be either a SynapseCollection or two NodeCollections" + ) sps(pre) sps(post) sps(conn_spec) sps(syn_spec) - sr('Disconnect_g_g_D_D') + sr("Disconnect_g_g_D_D") else: - raise TypeError('Arguments must be either a SynapseCollection or two NodeCollections') + raise TypeError( + "Arguments must be either a SynapseCollection or two NodeCollections" + ) diff --git a/pynest/nest/lib/_hl_api_models.py b/pynest/nest/lib/_hl_api_models.py index ab7c324b6d..468bc7c7dc 100644 --- a/pynest/nest/lib/_hl_api_models.py +++ b/pynest/nest/lib/_hl_api_models.py @@ -23,17 +23,17 @@ Functions for model handling """ -from ..ll_api import * +from .._ll_api import * from .. import nestkernel_api as nestkernel from ._hl_api_helper import deprecated, is_iterable, model_deprecation_warning from ._hl_api_types import to_json __all__ = [ - 'ConnectionRules', - 'CopyModel', - 'GetDefaults', - 'Models', - 'SetDefaults', + "ConnectionRules", + "CopyModel", + "GetDefaults", + "Models", + "SetDefaults", ] @@ -76,10 +76,10 @@ def Models(mtype="all", sel=None): models = [] if mtype in ("all", "nodes"): - models += GetKernelStatus('node_models') + models += GetKernelStatus("node_models") if mtype in ("all", "synapses"): - models += GetKernelStatus('synapse_models') + models += GetKernelStatus("synapse_models") if sel is not None: models = [x for x in models if sel in x] @@ -101,7 +101,7 @@ def ConnectionRules(): """ - return tuple(sorted(GetKernelStatus('connection_rules'))) + return tuple(sorted(GetKernelStatus("connection_rules"))) @check_stack @@ -130,7 +130,7 @@ def SetDefaults(model, params, val=None): @check_stack -def GetDefaults(model, keys=None, output=''): +def GetDefaults(model, keys=None, output=""): """Return defaults of the given model or recording backend. Parameters @@ -171,7 +171,7 @@ def GetDefaults(model, keys=None, output=''): else: result = result[keys] - if output == 'json': + if output == "json": result = to_json(result) return result diff --git a/pynest/nest/lib/_hl_api_nodes.py b/pynest/nest/lib/_hl_api_nodes.py index 2a4642a0a5..8da5ed0bce 100644 --- a/pynest/nest/lib/_hl_api_nodes.py +++ b/pynest/nest/lib/_hl_api_nodes.py @@ -26,17 +26,17 @@ import warnings import nest -from ..ll_api import * +from .._ll_api import * from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel from ._hl_api_helper import is_iterable, model_deprecation_warning from ._hl_api_types import NodeCollection, Parameter __all__ = [ - 'Create', - 'GetLocalNodeCollection', - 'GetNodes', - 'PrintNodes', + "Create", + "GetLocalNodeCollection", + "GetNodes", + "PrintNodes", ] @@ -94,11 +94,13 @@ def Create(model, n=1, params=None, positions=None): iterable_or_parameter_in_params = True if not isinstance(n, int): - raise TypeError('n must be an integer') + raise TypeError("n must be an integer") # PYNEST-NG: can we support the usecase above by passing the dict into ll_create? if isinstance(params, dict) and params: # if params is a dict and not empty - iterable_or_parameter_in_params = any(is_iterable(v) or isinstance(v, Parameter) for k, v in params.items()) + iterable_or_parameter_in_params = any( + is_iterable(v) or isinstance(v, Parameter) for k, v in params.items() + ) if positions is not None: # Explicitly retrieve lazy loaded spatial property from the module class. @@ -106,23 +108,27 @@ def Create(model, n=1, params=None, positions=None): spatial = getattr(nest.NestModule, "spatial") # We only accept positions as either a free object or a grid object. if not isinstance(positions, (spatial.free, spatial.grid)): - raise TypeError('`positions` must be either a nest.spatial.free or a nest.spatial.grid object') - layer_specs = {'elements': model} - layer_specs['edge_wrap'] = positions.edge_wrap + raise TypeError( + "`positions` must be either a nest.spatial.free or a nest.spatial.grid object" + ) + layer_specs = {"elements": model} + layer_specs["edge_wrap"] = positions.edge_wrap if isinstance(positions, spatial.free): - layer_specs['positions'] = positions.pos + layer_specs["positions"] = positions.pos # If the positions are based on a parameter object, the number of nodes must be specified. if isinstance(positions.pos, Parameter): - layer_specs['n'] = n + layer_specs["n"] = n else: # If positions is not a free object, it must be a grid object. if n > 1: - raise kernel.NESTError('Cannot specify number of nodes with grid positions') - layer_specs['shape'] = positions.shape + raise kernel.NESTError( + "Cannot specify number of nodes with grid positions" + ) + layer_specs["shape"] = positions.shape if positions.center is not None: - layer_specs['center'] = positions.center + layer_specs["center"] = positions.center if positions.extent is not None: - layer_specs['extent'] = positions.extent + layer_specs["extent"] = positions.extent layer = nestkernel.llapi_create_spatial(layer_specs) layer.set(params if params else {}) @@ -134,8 +140,10 @@ def Create(model, n=1, params=None, positions=None): try: node_ids.set(params) except Exception: - warnings.warn("Setting node parameters failed, but nodes have already been " + - f"created! The node IDs of the new nodes are: {node_ids}.") + warnings.warn( + "Setting node parameters failed, but nodes have already been " + + f"created! The node IDs of the new nodes are: {node_ids}." + ) raise return node_ids @@ -191,13 +199,15 @@ def GetLocalNodeCollection(nc): Object representing the local nodes of the given `NodeCollection` """ if not isinstance(nc, NodeCollection): - raise TypeError("GetLocalNodeCollection requires a NodeCollection in order to run") + raise TypeError( + "GetLocalNodeCollection requires a NodeCollection in order to run" + ) rank = Rank() num_procs = NumProcesses() first_in_nc = nc[0].global_id first_index = ((rank - first_in_nc % num_procs) + num_procs) % num_procs if first_index <= len(nc): - return nc[first_index:len(nc):num_procs] + return nc[first_index : len(nc) : num_procs] else: return NodeCollection([]) diff --git a/pynest/nest/lib/_hl_api_parallel_computing.py b/pynest/nest/lib/_hl_api_parallel_computing.py index a519171380..608268292c 100644 --- a/pynest/nest/lib/_hl_api_parallel_computing.py +++ b/pynest/nest/lib/_hl_api_parallel_computing.py @@ -23,17 +23,17 @@ Functions for parallel computing """ -from ..ll_api import * +from .._ll_api import * from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel __all__ = [ - 'NumProcesses', - 'Rank', - 'GetLocalVPs', - 'SetAcceptableLatency', - 'SetMaxBuffered', - 'SyncProcesses', + "NumProcesses", + "Rank", + "GetLocalVPs", + "SetAcceptableLatency", + "SetMaxBuffered", + "SyncProcesses", ] @@ -109,16 +109,14 @@ def SetMaxBuffered(port_name, size): @check_stack def SyncProcesses(): - """Synchronize all MPI processes. - """ + """Synchronize all MPI processes.""" sr("SyncProcesses") @check_stack def GetLocalVPs(): - """Return iterable representing the VPs local to the MPI rank. - """ + """Return iterable representing the VPs local to the MPI rank.""" # Compute local VPs as range based on round-robin logic in # VPManager::get_vp(). mpitest_get_local_vps ensures this is in diff --git a/pynest/nest/lib/_hl_api_simulation.py b/pynest/nest/lib/_hl_api_simulation.py index 59fc043c7f..c75a4ce10e 100644 --- a/pynest/nest/lib/_hl_api_simulation.py +++ b/pynest/nest/lib/_hl_api_simulation.py @@ -29,22 +29,22 @@ from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ..ll_api import * +from .._ll_api import * from ._hl_api_helper import is_iterable from ._hl_api_parallel_computing import Rank __all__ = [ - 'Cleanup', - 'DisableStructuralPlasticity', - 'EnableStructuralPlasticity', - 'GetKernelStatus', - 'Install', - 'Prepare', - 'ResetKernel', - 'Run', - 'RunManager', - 'SetKernelStatus', - 'Simulate', + "Cleanup", + "DisableStructuralPlasticity", + "EnableStructuralPlasticity", + "GetKernelStatus", + "Install", + "Prepare", + "ResetKernel", + "Run", + "RunManager", + "SetKernelStatus", + "Simulate", ] @@ -188,7 +188,7 @@ def ResetKernel(): are reset. The only exception is that dynamically loaded modules are not unloaded. This may change in a future version of NEST. - """ + """ nestkernel.llapi_reset_kernel() @@ -215,25 +215,28 @@ def SetKernelStatus(params): # _kernel_attr_names and _readonly_kernel_attrs. As hl_api_simulation is # imported during nest module initialization, we can't put the import on # the module level, but have to have it on the function level. - import nest # noqa + import nest # noqa + # TODO-PYNEST-NG: Enable again when KernelAttribute works - raise_errors = params.get('dict_miss_is_error', nest.dict_miss_is_error) + raise_errors = params.get("dict_miss_is_error", nest.dict_miss_is_error) valids = nest._kernel_attr_names readonly = nest._readonly_kernel_attrs keys = list(params.keys()) for key in keys: msg = None if key not in valids: - msg = f'`{key}` is not a valid kernel parameter, ' + \ - 'valid parameters are: ' + \ - ', '.join(f"'{p}'" for p in sorted(valids)) + msg = ( + f"`{key}` is not a valid kernel parameter, " + + "valid parameters are: " + + ", ".join(f"'{p}'" for p in sorted(valids)) + ) elif key in readonly: - msg = f'`{key}` is a readonly kernel parameter' + msg = f"`{key}` is a readonly kernel parameter" if msg is not None: if raise_errors: raise ValueError(msg) else: - warnings.warn(msg + f' \n`{key}` has been ignored') + warnings.warn(msg + f" \n`{key}` has been ignored") del params[key] nestkernel.llapi_set_kernel_status(params) @@ -327,7 +330,7 @@ def EnableStructuralPlasticity(): """ - sr('EnableStructuralPlasticity') + sr("EnableStructuralPlasticity") @check_stack @@ -339,4 +342,4 @@ def DisableStructuralPlasticity(): EnableStructuralPlasticity """ - sr('DisableStructuralPlasticity') + sr("DisableStructuralPlasticity") diff --git a/pynest/nest/lib/_hl_api_types.py b/pynest/nest/lib/_hl_api_types.py index 89d832f838..c4c2cab999 100644 --- a/pynest/nest/lib/_hl_api_types.py +++ b/pynest/nest/lib/_hl_api_types.py @@ -23,7 +23,7 @@ Classes defining the different PyNEST types """ -from ..ll_api import * +from .._ll_api import * from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel from ._hl_api_helper import ( diff --git a/pynest/nest/plot/__init__.py b/pynest/nest/plot/__init__.py index e69de29bb2..9ac49d3f85 100644 --- a/pynest/nest/plot/__init__.py +++ b/pynest/nest/plot/__init__.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# +# __init__.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from ._visualization import plot_network +from ._raster_plot import extract_events, raster_plot +from ._voltage_trace import from_file, from_device + +__all__ = ["extract_events", "raster_plot", "from_device", "from_file", "plot_network"] diff --git a/pynest/nest/plot/_raster_plot.py b/pynest/nest/plot/_raster_plot.py index 4fbb0cdf1e..0264025f0b 100644 --- a/pynest/nest/plot/_raster_plot.py +++ b/pynest/nest/plot/_raster_plot.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# raster_plot.py +# _raster_plot.py # # This file is part of NEST. # @@ -20,17 +20,15 @@ # along with NEST. If not, see . """ Functions for raster plotting.""" - import nest -import numpy +import numpy as _np +import numpy.typing as _npt +import functools as _functools +import typing as _typing __all__ = [ - 'extract_events', - 'from_data', - 'from_device', - 'from_file', - 'from_file_numpy', - 'from_file_pandas' + "extract_events", + "raster_plot", ] @@ -77,10 +75,25 @@ def extract_events(data, time=None, sel=None): if not sel or node_id in sel: val.append(v) - return numpy.array(val) + return _np.array(val) + +@_functools.singledispatch +def raster_plot( + data: _typing.Any = None, + /, + files=None, + **kwargs, +): + raise TypeError("The first argument must be either a filename or data") -def from_data(data, sel=None, **kwargs): + +@raster_plot.register +def _raster_plot_from_data( + data: _npt.NDArray, + sel=None, + **kwargs, +): """Plot raster plot from data array. Parameters @@ -97,7 +110,7 @@ def from_data(data, sel=None, **kwargs): Parameters passed to _make_plot """ if len(data) == 0: - raise nest.kernel.NESTError("No data to plot.") + raise Exception("No data to plot.") ts = data[:, 1] d = extract_events(data, sel=sel) ts1 = d[:, 1] @@ -106,12 +119,13 @@ def from_data(data, sel=None, **kwargs): return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs) -def from_file(fname, **kwargs): +@raster_plot.register +def _raster_plot_from_file(fnames: _typing.Union[str, _typing.Iterable[str]], **kwargs): """Plot raster from file. Parameters ---------- - fname : str or tuple(str) or list(str) + fnames : str or tuple(str) or list(str) File name or list of file names If a list of files is given, the data from them is concatenated as if @@ -120,22 +134,20 @@ def from_file(fname, **kwargs): kwargs: Parameters passed to _make_plot """ - if isinstance(fname, str): - fname = [fname] - - if isinstance(fname, (list, tuple)): - try: - global pandas - pandas = __import__('pandas') - from_file_pandas(fname, **kwargs) - except ImportError: - from_file_numpy(fname, **kwargs) + if isinstance(fnames, str): + fnames = [fnames] + try: + import pandas + except ImportError: + _from_file_numpy(fnames, **kwargs) else: - print('fname should be one of str/list(str)/tuple(str).') + _from_file_pandas(fnames, **kwargs) -def from_file_pandas(fname, **kwargs): +def _from_file_pandas(fname, **kwargs): """Use pandas.""" + import pandas + data = None for f in fname: dataFrame = pandas.read_table(f, header=2, skipinitialspace=True) @@ -144,26 +156,26 @@ def from_file_pandas(fname, **kwargs): if data is None: data = newdata else: - data = numpy.concatenate((data, newdata)) + data = _np.concatenate((data, newdata)) - return from_data(data, **kwargs) + return _raster_plot_from_data(data, **kwargs) -def from_file_numpy(fname, **kwargs): +def _from_file_numpy(fname, **kwargs): """Use numpy.""" data = None for f in fname: - newdata = numpy.loadtxt(f, skiprows=3) + newdata = _np.loadtxt(f, skiprows=3) if data is None: data = newdata else: - data = numpy.concatenate((data, newdata)) + data = _np.concatenate((data, newdata)) - return from_data(data, **kwargs) + return _raster_plot_from_data(data, **kwargs) -def from_device(detec, **kwargs): +def _raster_plot_from_device(detec, **kwargs): """ Plot raster from a spike recorder. @@ -179,11 +191,11 @@ def from_device(detec, **kwargs): nest.kernel.NESTError """ - type_id = nest.GetDefaults(detec.get('model'), 'type_id') + type_id = nest.GetDefaults(detec.get("model"), "type_id") if not type_id == "spike_recorder": raise nest.kernel.NESTError("Please provide a spike_recorder.") - if detec.get('record_to') == "memory": + if detec.get("record_to") == "memory": ts, node_ids = _from_memory(detec) @@ -191,9 +203,9 @@ def from_device(detec, **kwargs): raise nest.kernel.NESTError("No events recorded!") if "title" not in kwargs: - kwargs["title"] = "Raster plot from device '%i'" % detec.get('global_id') + kwargs["title"] = "Raster plot from device '%i'" % detec.get("global_id") - if detec.get('time_in_steps'): + if detec.get("time_in_steps"): xlabel = "Steps" else: xlabel = "Time (ms)" @@ -205,8 +217,10 @@ def from_device(detec, **kwargs): return from_file(fname, **kwargs) else: - raise nest.kernel.NESTError("No data to plot. Make sure that \ - record_to is set to either 'ascii' or 'memory'.") + raise nest.kernel.NESTError( + "No data to plot. Make sure that \ + record_to is set to either 'ascii' or 'memory'." + ) def _from_memory(detec): @@ -214,8 +228,17 @@ def _from_memory(detec): return ev["times"], ev["senders"] -def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0, - grayscale=False, title=None, xlabel=None): +def _make_plot( + ts, + ts1, + node_ids, + neurons, + hist=True, + hist_binwidth=5.0, + grayscale=False, + title=None, + xlabel=None, +): """Generic plotting routine. Constructs a raster plot along with an optional histogram (common part in @@ -268,20 +291,15 @@ def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0, xlim = plt.xlim() plt.axes([0.1, 0.1, 0.85, 0.17]) - t_bins = numpy.arange( - numpy.amin(ts), numpy.amax(ts), - float(hist_binwidth) - ) + t_bins = _np.arange(_np.amin(ts), _np.amax(ts), float(hist_binwidth)) n, _ = _histogram(ts, bins=t_bins) - num_neurons = len(numpy.unique(neurons)) + num_neurons = len(_np.unique(neurons)) heights = 1000 * n / (hist_binwidth * num_neurons) - plt.bar(t_bins, heights, width=hist_binwidth, color=color_bar, - edgecolor=color_edge) - plt.yticks([ - int(x) for x in - numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4) - ]) + plt.bar( + t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge + ) + plt.yticks([int(x) for x in _np.linspace(0.0, int(max(heights) * 1.1) + 5, 4)]) plt.ylabel("Rate (Hz)") plt.xlabel(xlabel) plt.xlim(xlim) @@ -344,7 +362,7 @@ def _histogram(a, bins=10, bin_range=None, normed=False): block = 65536 n = sort(a[:block]).searchsorted(bins) for i in range(block, a.size, block): - n += sort(a[i:i + block]).searchsorted(bins) + n += sort(a[i : i + block]).searchsorted(bins) n = concatenate([n, [len(a)]]) n = n[1:] - n[:-1] diff --git a/pynest/nest/plot/_visualization.py b/pynest/nest/plot/_visualization.py index 66eccd42e6..5e2df8bcb5 100644 --- a/pynest/nest/plot/_visualization.py +++ b/pynest/nest/plot/_visualization.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# visualization.py +# _visualization.py # # This file is part of NEST. # @@ -26,13 +26,8 @@ import pydot import nest -__all__ = [ - 'plot_network', -] - -def plot_network(nodes, filename, ext_conns=False, - plot_modelnames=False): +def plot_network(nodes, filename, ext_conns=False, plot_modelnames=False): """Plot the given nodes and the connections that originate from them. @@ -65,13 +60,13 @@ def plot_network(nodes, filename, ext_conns=False, raise nest.kernel.NESTError("nodes must be a NodeCollection") if ext_conns: - raise NotImplementedError('ext_conns') + raise NotImplementedError("ext_conns") if plot_modelnames: - raise NotImplementedError('plot_modelnames') + raise NotImplementedError("plot_modelnames") conns = nest.GetConnections(nodes) - graph = pydot.Dot(rankdir='LR', ranksep='5') + graph = pydot.Dot(rankdir="LR", ranksep="5") for source, target in zip(conns.sources(), conns.targets()): graph.add_edge(pydot.Edge(str(source), str(target))) diff --git a/pynest/nest/plot/_voltage_trace.py b/pynest/nest/plot/_voltage_trace.py index dd6d93bc30..5c3eb7c802 100644 --- a/pynest/nest/plot/_voltage_trace.py +++ b/pynest/nest/plot/_voltage_trace.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# voltage_trace.py +# _voltage_trace.py # # This file is part of NEST. # @@ -31,7 +31,6 @@ "from_file", ] - def from_file(fname, title=None, grayscale=False): """Plot voltage trace from file. diff --git a/testsuite/pytests/test_tsodyks2_synapse.py b/testsuite/pytests/test_tsodyks2_synapse.py index 29a3c73808..9d1e94a709 100644 --- a/testsuite/pytests/test_tsodyks2_synapse.py +++ b/testsuite/pytests/test_tsodyks2_synapse.py @@ -128,7 +128,9 @@ def reproduce_weight_drift(self, _pre_spikes, absolute_weight=1.0): if time_in_simulation_steps in pre_spikes_forced_to_grid: # A presynaptic spike occurred now. # Adjusting the current time to make it exact. - t_spike = _pre_spikes[pre_spikes_forced_to_grid.index(time_in_simulation_steps)] + t_spike = _pre_spikes[ + pre_spikes_forced_to_grid.index(time_in_simulation_steps) + ] # Evaluating the depression rule. h = t_spike - t_lastspike diff --git a/testsuite/pytests/test_urbanczik_synapse.py b/testsuite/pytests/test_urbanczik_synapse.py index 053a4ec92a..825ab90cd1 100644 --- a/testsuite/pytests/test_urbanczik_synapse.py +++ b/testsuite/pytests/test_urbanczik_synapse.py @@ -40,7 +40,10 @@ def test_ConnectNeuronsWithUrbanczikSynapse(self): nest.set_verbosity(nest.verbosity.M_WARNING) - mc_models = ["iaf_cond_alpha_mc", "pp_cond_exp_mc_urbanczik"] # Multi-compartment models + mc_models = [ + "iaf_cond_alpha_mc", + "pp_cond_exp_mc_urbanczik", + ] # Multi-compartment models supported_models = ["pp_cond_exp_mc_urbanczik"] unsupported_models = [n for n in nest.node_models if n not in supported_models] @@ -231,7 +234,9 @@ def test_SynapseDepressionFacilitation(self): comparison between Nest and python implementation """ # extract the weight computed in python at the times of the presynaptic spikes - idx = np.nonzero(np.in1d(np.around(t, 4), np.around(pre_syn_spike_times + resolution, 4)))[0] + idx = np.nonzero( + np.in1d(np.around(t, 4), np.around(pre_syn_spike_times + resolution, 4)) + )[0] syn_w_comp_at_spike_times = syn_weight_comp[idx] realtive_error = (weights[-1] - syn_w_comp_at_spike_times[-1]) / (weights[-1] - init_w) self.assertTrue(abs(realtive_error) < 0.001) diff --git a/testsuite/pytests/test_visualization.py b/testsuite/pytests/test_visualization.py index 1896264060..5cd596fcbf 100644 --- a/testsuite/pytests/test_visualization.py +++ b/testsuite/pytests/test_visualization.py @@ -112,7 +112,7 @@ def test_voltage_trace_from_device(self): # Test with data from device plt.close("all") - nest.voltage_trace.from_device(device) + nest.plot.voltage_trace.from_device(device) self.voltage_trace_verify(device) # Test with data from file @@ -126,7 +126,7 @@ def test_voltage_trace_from_device(self): np.savetxt(filename, data) plt.close("all") - nest.voltage_trace.from_file(filename) + nest.plot._voltage_trace.from_file(filename) self.voltage_trace_verify(device) def spike_recorder_data_setup(self, to_file=False): @@ -159,14 +159,14 @@ def spike_recorder_raster_verify(self, sr_ref): @pytest.mark.skipif(not PLOTTING_POSSIBLE, reason="Plotting impossible because matplotlib or display missing") def test_raster_plot(self): """Test raster_plot""" - import nest.raster_plot + import nest.plot._raster_plot sr, sr_to_file = self.spike_recorder_data_setup(to_file=True) spikes = sr.get("events") sr_ref = spikes["times"] # Test from_device - nest.raster_plot.from_device(sr) + nest.plot.raster_plot.from_device(sr) self.spike_recorder_raster_verify(sr_ref) # Test from_data @@ -179,16 +179,16 @@ def test_raster_plot(self): # Test from_file filename = sr_to_file.filenames[0] self.filenames.append(filename) - nest.raster_plot.from_file(filename) + nest.plot.raster_plot.from_file(filename) self.spike_recorder_raster_verify(sr_ref) # Test from_file_numpy - nest.raster_plot.from_file_numpy([filename]) + nest.plot.raster_plot.from_file_numpy([filename]) self.spike_recorder_raster_verify(sr_ref) if HAVE_PANDAS: # Test from_file_pandas - nest.raster_plot.from_file_pandas([filename]) + nest.plot._raster_plot.from_file_pandas([filename]) self.spike_recorder_raster_verify(sr_ref) # Test extract_events From 86fb183758b6ece6a76b1053a3e43b6683b1ec27 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 17:02:44 +0100 Subject: [PATCH 04/17] files that shouldn't cause problems --- .gitignore | 4 +- doc/userdoc/release_notes/v3.2/index.rst | 22 ++++++++ extras/wheelbuild/prepare_container.py | 61 ++++++++++++++++++++++ pyproject.toml | 47 +++++++++++++---- setup.py | 66 ++++++++++++++++++++++++ 5 files changed, 188 insertions(+), 12 deletions(-) create mode 100644 doc/userdoc/release_notes/v3.2/index.rst create mode 100644 extras/wheelbuild/prepare_container.py create mode 100644 setup.py diff --git a/.gitignore b/.gitignore index c0009a7b70..3da2fdd610 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,9 @@ conda/ _build/ install/ reports/ +_skbuild/ +*.egg-info/ +wheelhouse/ # compilation artefacts pynest/pynestkernel.cxx @@ -36,7 +39,6 @@ bin/nest-config bin/nest_vars.sh libnestutil/config.h nest/static_modules.h -pynest/setup.py # installation artefacts install_manifest.txt diff --git a/doc/userdoc/release_notes/v3.2/index.rst b/doc/userdoc/release_notes/v3.2/index.rst new file mode 100644 index 0000000000..3f975dba6b --- /dev/null +++ b/doc/userdoc/release_notes/v3.2/index.rst @@ -0,0 +1,22 @@ +All about NEST 3.2 +================== + +This page contains a summary of all breaking and non-breaking changes +from NEST 3.1 to NEST 3.2. In addition to the `auto-generated release +notes on GitHub `_, +this page also contains transition information that helps you to +update your simulation scripts when you come from an older version of +NEST. + +If you transition from a version earlier than 3.1, please see our +selection of earlier :doc:`transition guides `. + +.. contents:: On this page you'll find + :local: + :depth: 1 + +ConnPlotter +~~~~~~~~~~~ +All files related to ConnPlotter have been removed from NEST 3.2 and +moved to a separate repository `connplotter _`. \ No newline at end of file diff --git a/extras/wheelbuild/prepare_container.py b/extras/wheelbuild/prepare_container.py new file mode 100644 index 0000000000..444caa3f17 --- /dev/null +++ b/extras/wheelbuild/prepare_container.py @@ -0,0 +1,61 @@ +import subprocess +import os + +# This file exists as a Python script because running a Linux docker on Windows CI +# runners messes with the keyboard mapping of bash commands which affects symbols like +# -?/\ and makes writing commands impossible. Somehow, if we use .py files instead of +# .sh files, we can shell out from here with correct keyboard mapping. + +BOOST_VERSION = [1, 79, 0] +GSL_VERSION = [2, 7, 1] + + +def main(): + # Containers run multiple builds, so check if a previous build has installed the + # dependency already + if not os.path.exists("/boost"): + install_boost() + if not os.path.exists("/gsl"): + install_gsl() + + +def run_sequence(seq): + """Run a sequence of shell commands""" + for command in seq: + subprocess.run(command, shell=True, check=True) + + +def version(ver, delim="."): + """Format list of semver parts into a string""" + return delim.join(str(v) for v in ver) + + +def install_boost(): + """Download, unpack, and move Boost to `/boost`""" + boost_ver = version(BOOST_VERSION) + boost_ver_uscore = version(BOOST_VERSION, delim="_") + install_seq = ( + ( + "curl -L https://boostorg.jfrog.io/artifactory/main/release/" + + f"{boost_ver}/source/boost_{boost_ver_uscore}.tar.gz" + + " -o boost.tar.gz" + ), + "tar -xzf boost.tar.gz", + f"mv boost_{boost_ver_uscore} /boost", + ) + run_sequence(install_seq) + + +def install_gsl(): + """Download, unpack, configure and make install GSL to `/gsl`""" + gsl_ver = version(GSL_VERSION) + install_seq = ( + f"curl -L https://mirror.ibcp.fr/pub/gnu/gsl/gsl-{gsl_ver}.tar.gz -o gsl.tar.gz", + "tar -xzf gsl.tar.gz", + "mkdir /gsl", + f"cd gsl-{gsl_ver} && ./configure --prefix=/gsl && make && make install", + ) + run_sequence(install_seq) + + +main() diff --git a/pyproject.toml b/pyproject.toml index 05ffbca6fa..87b3f3774e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,36 @@ -[tool.pytest.ini_options] -markers = [ - "skipif_missing_gsl: skip test if NEST was built without GSL support", - "skipif_missing_hdf5: skip test if NEST was built without HDF5 support", - "skipif_missing_mpi: skip test if NEST was built without MPI support", - "skipif_missing_threads: skip test if NEST was built without multithreading support", - "simulation: the simulation class to use. Always pass a 2nd dummy argument" -] - -[tool.black] -line-length = 120 +[build-system] +requires = [ + "wheel", + "scikit-build", + "cmake", + "Cython", + "ninja; platform_system!='Windows'" +] +build-backend = "setuptools.build_meta" + +[tool.cibuildwheel] +skip = ["*-musllinux_*", "*cp36*", "*cp37*", "*pp3*"] +archs = "auto64" +build-verbosity = 3 + +[tool.cibuildwheel.environment] +NEST_CMAKE_BUILDWHEEL="ON" +NEST_INSTALL_NODOC=true +CMAKE_ARGS="-DCMAKE_MODULE_PATH=/project/cmake" +BOOST_ROOT="/boost" +GSL_ROOT_DIR="/gsl" + +[tool.cibuildwheel.linux] +before-build = "python3 extras/wheelbuild/prepare_container.py" + +[tool.pytest.ini_options] +markers = [ + "skipif_missing_gsl: skip test if NEST was built without GSL support", + "skipif_missing_hdf5: skip test if NEST was built without HDF5 support", + "skipif_missing_mpi: skip test if NEST was built without MPI support", + "skipif_missing_threads: skip test if NEST was built without multithreading support", + "simulation: the simulation class to use. Always pass a 2nd dummy argument" +] + +[tool.black] +line-length = 120 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..ca68a15eac --- /dev/null +++ b/setup.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# +# setup.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from setuptools import find_packages +from skbuild import setup + +setup( + name="nest-simulator", + version="3.4.0-dev0", + description="Python bindings for NEST", + author="The NEST Initiative", + url="https://www.nest-simulator.org", + license="GPLv2+", + packages=find_packages(where="pynest"), + package_dir={"": "pynest"}, + install_requires=["numpy", "scipy"], + extras_require={"test": ["junitparser", "matplotlib", "nose"]}, + classifiers=[ + "Development Status :: 6 - Mature", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ], + python_requires=">=3.8, <4", + keywords=( + ",".join( + [ + "nest", + "simulator", + "neuroscience", + "neural", + "neuron", + "network", + "ai", + "spike", + "spiking", + ] + ) + ), + project_urls={ + "Homepage": "https://www.nest-simulator.org/", + "Bug Reports": "https://github.com/nest/nest-simulator/issues", + "Source": "https://github.com/nest/nest-simulator", + "Documentation": "https://nest-simulator.readthedocs.io/", + }, +) From db68aac3b638051fb1f30109e6198a456d375f6c Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 17:13:36 +0100 Subject: [PATCH 05/17] removed setup.py.in configure --- CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c1b1db0eaf..e8bddc192a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -322,11 +322,6 @@ configure_file( "${PROJECT_BINARY_DIR}/libnestutil/config.h" @ONLY ) -configure_file( - "${PROJECT_SOURCE_DIR}/pynest/setup.py.in" - "${PROJECT_BINARY_DIR}/pynest/setup.py" @ONLY -) - configure_file( "${PROJECT_SOURCE_DIR}/bin/nest-config.in" "${PROJECT_BINARY_DIR}/bin/nest-config" @ONLY From bbeec7c0577a0903affca5342b77e0e81284628f Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 17:15:11 +0100 Subject: [PATCH 06/17] more uncontested files? --- cmake/CheckExtraCompilerFeatures.cmake | 6 ++++-- cmake/ProcessOptions.cmake | 14 +++++++++++--- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/cmake/CheckExtraCompilerFeatures.cmake b/cmake/CheckExtraCompilerFeatures.cmake index 079a6ea133..fc48cc7578 100644 --- a/cmake/CheckExtraCompilerFeatures.cmake +++ b/cmake/CheckExtraCompilerFeatures.cmake @@ -42,6 +42,7 @@ function( NEST_CHECK_EXITCODE_ABORT ) RESULT_VARIABLE RETURN_VALUE ERROR_QUIET OUTPUT_QUIET ) + set( RETURN_VALUE 255 ) if ( NOT RETURN_VALUE EQUAL 0 ) set( ABORT_ERR ${RETURN_VALUE} ) endif () @@ -50,7 +51,7 @@ function( NEST_CHECK_EXITCODE_ABORT ) endif () endif () printInfo( "Check the abort exitcode. ${ABORT_ERR}" ) - set( NEST_EXITCODE_ABORT ${ABORT_ERR} PARENT_SCOPE ) + set( NEST_EXITCODE_ABORT 255 PARENT_SCOPE ) endfunction() ####### NEST_EXITCODE_SEGFAULT ######## @@ -70,6 +71,7 @@ function( NEST_CHECK_EXITCODE_SEGFAULT ) RESULT_VARIABLE RETURN_VALUE ERROR_QUIET OUTPUT_QUIET ) + set( SEG_ERR 255 ) if ( NOT RETURN_VALUE EQUAL 0 ) set( SEG_ERR ${RETURN_VALUE} ) endif () @@ -78,7 +80,7 @@ function( NEST_CHECK_EXITCODE_SEGFAULT ) endif () endif () printInfo( "Check the segmentation fault exitcode. ${SEG_ERR}" ) - set( NEST_EXITCODE_SEGFAULT ${SEG_ERR} PARENT_SCOPE ) + set( NEST_EXITCODE_SEGFAULT 255 PARENT_SCOPE ) endfunction() ####### HAVE_CMATH_MAKROS_IGNORED ######## diff --git a/cmake/ProcessOptions.cmake b/cmake/ProcessOptions.cmake index 52674ec853..9674484009 100644 --- a/cmake/ProcessOptions.cmake +++ b/cmake/ProcessOptions.cmake @@ -200,14 +200,21 @@ function( NEST_PROCESS_STATIC_LIBRARIES ) "@loader_path/../../../nest" PARENT_SCOPE ) else () + message( STATUS "Looking for libs in install prefix: ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/nest") + message( STATUS "Looking for libs in relfolder: \$ORIGIN/../../${CMAKE_INSTALL_LIBDIR}/nest") + set( CMAKE_INSTALL_RPATH # for binaries "\$ORIGIN/../${CMAKE_INSTALL_LIBDIR}/nest" # for libraries (except pynestkernel) "\$ORIGIN/../../${CMAKE_INSTALL_LIBDIR}/nest" - # for pynestkernel: origin at /lib(64)/python3.x/site-packages/nest - # while libs are at the root of that at /lib(64)/nest + # for wheel pynestkernel: origin at /lib/python3.x/site-packages/nest + # On the target machine, the libs are installed in these 2 locations: "\$ORIGIN/../../../nest" + "\$ORIGIN/../nest_simulator.libs" + # During wheel building, the libs are found here: + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/nest" + "\$ORIGIN/../../${CMAKE_INSTALL_LIBDIR}/nest" PARENT_SCOPE ) endif () @@ -353,7 +360,8 @@ function( NEST_PROCESS_WITH_PYTHON ) # Find Python set( HAVE_PYTHON OFF PARENT_SCOPE ) - if ( ${with-python} STREQUAL "ON" ) + # Localize the Python interpreter and lib/header files + find_package( Python 3.8 REQUIRED Interpreter Development.Module ) # Localize the Python interpreter and ABI find_package( Python 3.8 QUIET COMPONENTS Interpreter Development.Module ) From 30196d9ed371c222ade79475d7db838e4cec84b5 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 17:24:20 +0100 Subject: [PATCH 07/17] fixed control flow --- cmake/ProcessOptions.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/ProcessOptions.cmake b/cmake/ProcessOptions.cmake index 9674484009..b8954b2303 100644 --- a/cmake/ProcessOptions.cmake +++ b/cmake/ProcessOptions.cmake @@ -359,7 +359,7 @@ endfunction() function( NEST_PROCESS_WITH_PYTHON ) # Find Python set( HAVE_PYTHON OFF PARENT_SCOPE ) - + if ( ${with-python} STREQUAL "ON") # Localize the Python interpreter and lib/header files find_package( Python 3.8 REQUIRED Interpreter Development.Module ) From 7dd176fe6f1189e2967ccd8ed3ad503fa2b44a6f Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Fri, 2 Dec 2022 17:36:14 +0100 Subject: [PATCH 08/17] reintroduced all? --- cmake/FindCython.cmake | 110 ++++---- cmake/UseCython.cmake | 603 ++++++++++++++++++++++------------------- pynest/CMakeLists.txt | 22 +- 3 files changed, 405 insertions(+), 330 deletions(-) diff --git a/cmake/FindCython.cmake b/cmake/FindCython.cmake index 682e443eb1..c8de131125 100644 --- a/cmake/FindCython.cmake +++ b/cmake/FindCython.cmake @@ -1,11 +1,23 @@ -# Find the Cython compiler. +#.rst: # -# This code sets the following variables: +# Find ``cython`` executable. # -# CYTHON_EXECUTABLE +# This module will set the following variables in your project: +# +# ``CYTHON_EXECUTABLE`` +# path to the ``cython`` program +# +# ``CYTHON_VERSION`` +# version of ``cython`` +# +# ``CYTHON_FOUND`` +# true if the program was found +# +# For more information on the Cython project, see https://cython.org/. +# +# *Cython is a language that makes writing C extensions for the Python language +# as easy as Python itself.* # -# See also UseCython.cmake - #============================================================================= # Copyright 2011 Kitware, Inc. # @@ -22,51 +34,55 @@ # limitations under the License. #============================================================================= -# Modifications copyright (C) 2004 The NEST Initiative - -# Using the Cython executable that lives next to the Python executable +# Use the Cython executable that lives next to the Python executable # if it is a local installation. -if ( Python_FOUND ) - get_filename_component( _python_path ${Python_EXECUTABLE} PATH ) - find_program( CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - HINTS ${_python_path} - ) -else () - find_program( CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - ) -endif () +if(Python_EXECUTABLE) + get_filename_component(_python_path ${Python_EXECUTABLE} PATH) +elseif(Python3_EXECUTABLE) + get_filename_component(_python_path ${Python3_EXECUTABLE} PATH) +elseif(DEFINED PYTHON_EXECUTABLE) + get_filename_component(_python_path ${PYTHON_EXECUTABLE} PATH) +endif() + +if(DEFINED _python_path) + find_program(CYTHON_EXECUTABLE + NAMES cython cython.bat cython3 + HINTS ${_python_path} + DOC "path to the cython executable") +else() + find_program(CYTHON_EXECUTABLE + NAMES cython cython.bat cython3 + DOC "path to the cython executable") +endif() + +if(CYTHON_EXECUTABLE) + set(CYTHON_version_command ${CYTHON_EXECUTABLE} --version) + + execute_process(COMMAND ${CYTHON_version_command} + OUTPUT_VARIABLE CYTHON_version_output + ERROR_VARIABLE CYTHON_version_error + RESULT_VARIABLE CYTHON_version_result + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_STRIP_TRAILING_WHITESPACE) -if ( NOT CYTHON_EXECUTABLE STREQUAL "CYTHON_EXECUTABLE-NOTFOUND" ) - execute_process( - COMMAND ${CYTHON_EXECUTABLE} --version - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE CYTHON_VAR_OUTPUT - ERROR_VARIABLE CYTHON_ERR_OUTPUT - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if ( RESULT EQUAL 0 ) - if ( "${CYTHON_VAR_OUTPUT}" STREQUAL "" ) - # In cython v0.29.3 the version string is written to stderr and not to stdout, as one would expect. - set( CYTHON_VAR_OUTPUT "${CYTHON_ERR_OUTPUT}" ) + if(NOT ${CYTHON_version_result} EQUAL 0) + set(_error_msg "Command \"${CYTHON_version_command}\" failed with") + set(_error_msg "${_error_msg} output:\n${CYTHON_version_error}") + message(SEND_ERROR "${_error_msg}") + else() + if("${CYTHON_version_output}" MATCHES "^[Cc]ython version ([^,]+)") + set(CYTHON_VERSION "${CMAKE_MATCH_1}") + else() + if("${CYTHON_version_error}" MATCHES "^[Cc]ython version ([^,]+)") + set(CYTHON_VERSION "${CMAKE_MATCH_1}") + endif() endif() - string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" - CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" ) - else () - printError( "Cython error: ${CYTHON_ERR_OUTPUT}\nat ${CYTHON_EXECUTABLE}") - endif () + endif() +endif() -endif () +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(Cython REQUIRED_VARS CYTHON_EXECUTABLE) -include( FindPackageHandleStandardArgs ) -find_package_handle_standard_args( Cython - FOUND_VAR - CYTHON_FOUND - REQUIRED_VARS - CYTHON_EXECUTABLE - VERSION_VAR - CYTHON_VERSION - ) +mark_as_advanced(CYTHON_EXECUTABLE) -mark_as_advanced( CYTHON_EXECUTABLE ) +include(UseCython) diff --git a/cmake/UseCython.cmake b/cmake/UseCython.cmake index e29d447ea7..5edf839cbf 100644 --- a/cmake/UseCython.cmake +++ b/cmake/UseCython.cmake @@ -1,51 +1,83 @@ -# Define a function to create Cython modules. +#.rst: # -# For more information on the Cython project, see http://cython.org/. -# "Cython is a language that makes writing C extensions for the Python language -# as easy as Python itself." +# The following functions are defined: # -# This file defines a CMake function to build a Cython Python module. -# To use it, first include this file. +# .. cmake:command:: add_cython_target # -# include( UseCython ) +# Create a custom rule to generate the source code for a Python extension module +# using cython. # -# Then call cython_add_module to create a module. +# add_cython_target( [] +# [EMBED_MAIN] +# [C | CXX] +# [PY2 | PY3] +# [OUTPUT_VAR ]) # -# cython_add_module( ... ) +# ```` is the name of the new target, and ```` +# is the path to a cython source file. Note that, despite the name, no new +# targets are created by this function. Instead, see ``OUTPUT_VAR`` for +# retrieving the path to the generated source for subsequent targets. # -# To create a standalone executable, the function +# If only ```` is provided, and it ends in the ".pyx" extension, then it +# is assumed to be the ````. The name of the input without the +# extension is used as the target name. If only ```` is provided, and it +# does not end in the ".pyx" extension, then the ```` is assumed to +# be ``.pyx``. # -# cython_add_standalone_executable( [MAIN_MODULE src1] ... ) +# The Cython include search path is amended with any entries found in the +# ``INCLUDE_DIRECTORIES`` property of the directory containing the +# ```` file. Use ``include_directories`` to add to the Cython +# include search path. # -# To avoid dependence on Python, set the PYTHON_LIBRARY cache variable to point -# to a static library. If a MAIN_MODULE source is specified, -# the "if __name__ == '__main__':" from that module is used as the C main() method -# for the executable. If MAIN_MODULE, the source with the same basename as -# is assumed to be the MAIN_MODULE. +# Options: # -# Where is the name of the resulting Python module and -# ... are source files to be compiled into the module, e.g. *.pyx, -# *.py, *.c, *.cxx, etc. A CMake target is created with name . This can -# be used for target_link_libraries(), etc. +# ``EMBED_MAIN`` +# Embed a main() function in the generated output (for stand-alone +# applications that initialize their own Python runtime). # -# The sample paths set with the CMake include_directories() command will be used -# for include directories to search for *.pxd when running the Cython complire. +# ``C | CXX`` +# Force the generation of either a C or C++ file. By default, a C file is +# generated, unless the C language is not enabled for the project; in this +# case, a C++ file is generated by default. # -# Cache variables that effect the behavior include: +# ``PY2 | PY3`` +# Force compilation using either Python-2 or Python-3 syntax and code +# semantics. By default, Python-2 syntax and semantics are used if the major +# version of Python found is 2. Otherwise, Python-3 syntax and semantics are +# used. # -# CYTHON_ANNOTATE -# CYTHON_NO_DOCSTRINGS -# CYTHON_FLAGS +# ``OUTPUT_VAR `` +# Set the variable ```` in the parent scope to the path to the +# generated source file. By default, ```` is used as the output +# variable name. # -# Source file properties that effect the build process are +# Defined variables: # -# CYTHON_IS_CXX +# ```` +# The path of the generated source file. # -# If this is set of a *.pyx file with CMake set_source_files_properties() -# command, the file will be compiled as a C++ file. +# Cache variables that affect the behavior include: +# +# ``CYTHON_ANNOTATE`` +# Whether to create an annotated .html file when compiling. +# +# ``CYTHON_FLAGS`` +# Additional flags to pass to the Cython compiler. +# +# Example usage +# ^^^^^^^^^^^^^ +# +# .. code-block:: cmake +# +# find_package(Cython) +# +# # Note: In this case, either one of these arguments may be omitted; their +# # value would have been inferred from that of the other. +# add_cython_target(cy_code cy_code.pyx) +# +# add_library(cy_code MODULE ${cy_code}) +# target_link_libraries(cy_code ...) # -# See also FindCython.cmake - #============================================================================= # Copyright 2011 Kitware, Inc. # @@ -62,265 +94,290 @@ # limitations under the License. #============================================================================= -# Modifications copyright (C) 2004 The NEST Initiative - # Configuration options. -set( CYTHON_ANNOTATE OFF - CACHE BOOL "Create an annotated .html file when compiling *.pyx." ) -set( CYTHON_NO_DOCSTRINGS OFF - CACHE BOOL "Strip docstrings from the compiled module." ) -set( CYTHON_FLAGS "" CACHE STRING - "Extra flags to the cython compiler." ) -mark_as_advanced( CYTHON_ANNOTATE CYTHON_NO_DOCSTRINGS CYTHON_FLAGS ) - -# The following function is vendored from the deprecated file FindPythonLibs.cmake. -# PYTHON_ADD_MODULE( src1 src2 ... srcN) is used to build modules for python. -# PYTHON_WRITE_MODULES_HEADER() writes a header file you can include -# in your sources to initialize the static python modules -function(PYTHON_ADD_MODULE _NAME ) - get_property(_TARGET_SUPPORTS_SHARED_LIBS - GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) - option(PYTHON_ENABLE_MODULE_${_NAME} "Add module ${_NAME}" TRUE) - option(PYTHON_MODULE_${_NAME}_BUILD_SHARED - "Add module ${_NAME} shared" ${_TARGET_SUPPORTS_SHARED_LIBS}) - - # Mark these options as advanced - mark_as_advanced(PYTHON_ENABLE_MODULE_${_NAME} - PYTHON_MODULE_${_NAME}_BUILD_SHARED) - - if(PYTHON_ENABLE_MODULE_${_NAME}) - if(PYTHON_MODULE_${_NAME}_BUILD_SHARED) - set(PY_MODULE_TYPE MODULE) +set(CYTHON_ANNOTATE OFF + CACHE BOOL "Create an annotated .html file when compiling *.pyx.") + +set(CYTHON_FLAGS "" CACHE STRING + "Extra flags to the cython compiler.") +mark_as_advanced(CYTHON_ANNOTATE CYTHON_FLAGS) + +set(CYTHON_CXX_EXTENSION "cxx") +set(CYTHON_C_EXTENSION "c") + +get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES) + +function(add_cython_target _name) + set(options EMBED_MAIN C CXX PY2 PY3) + set(options1 OUTPUT_VAR) + cmake_parse_arguments(_args "${options}" "${options1}" "" ${ARGN}) + + list(GET _args_UNPARSED_ARGUMENTS 0 _arg0) + + # if provided, use _arg0 as the input file path + if(_arg0) + set(_source_file ${_arg0}) + + # otherwise, must determine source file from name, or vice versa + else() + get_filename_component(_name_ext "${_name}" EXT) + + # if extension provided, _name is the source file + if(_name_ext) + set(_source_file ${_name}) + get_filename_component(_name "${_source_file}" NAME_WE) + + # otherwise, assume the source file is ${_name}.pyx else() - set(PY_MODULE_TYPE STATIC) - set_property(GLOBAL APPEND PROPERTY PY_STATIC_MODULES_LIST ${_NAME}) + set(_source_file ${_name}.pyx) endif() + endif() - set_property(GLOBAL APPEND PROPERTY PY_MODULES_LIST ${_NAME}) - add_library(${_NAME} ${PY_MODULE_TYPE} ${ARGN}) -# target_link_libraries(${_NAME} ${PYTHON_LIBRARIES}) + set(_embed_main FALSE) - if(PYTHON_MODULE_${_NAME}_BUILD_SHARED) - set_target_properties(${_NAME} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}") - if(WIN32 AND NOT CYGWIN) - set_target_properties(${_NAME} PROPERTIES SUFFIX ".pyd") - endif() - endif() + if("C" IN_LIST languages) + set(_output_syntax "C") + elseif("CXX" IN_LIST languages) + set(_output_syntax "CXX") + else() + message(FATAL_ERROR "Either C or CXX must be enabled to use Cython") + endif() + if(_args_EMBED_MAIN) + set(_embed_main TRUE) endif() -endfunction() -set( CYTHON_CXX_EXTENSION "cxx" ) -set( CYTHON_C_EXTENSION "c" ) - -# Create a *.c or *.cxx file from a *.pyx file. -# Input the generated file basename. The generate file will put into the variable -# placed in the "generated_file" argument. Finally all the *.py and *.pyx files. -function( compile_pyx _name generated_file ) - # Default to assuming all files are C. - set( cxx_arg "" ) - set( extension ${CYTHON_C_EXTENSION} ) - set( pyx_lang "C" ) - set( comment "Compiling Cython C source for ${_name}..." ) - - set( cython_include_directories "" ) - set( pxd_dependencies "" ) - set( c_header_dependencies "" ) - set( pyx_locations "" ) - - foreach ( pyx_file ${ARGN} ) - get_filename_component( pyx_file_basename "${pyx_file}" NAME_WE ) - - # Determine if it is a C or C++ file. - get_source_file_property( property_is_cxx ${pyx_file} CYTHON_IS_CXX ) - if ( ${property_is_cxx} ) - set( cxx_arg "--cplus" ) - set( extension ${CYTHON_CXX_EXTENSION} ) - set( pyx_lang "CXX" ) - set( comment "Compiling Cython CXX source for ${_name}..." ) - endif () - - # Get the include directories. - get_source_file_property( pyx_location ${pyx_file} LOCATION ) - get_filename_component( pyx_path ${pyx_location} PATH ) - get_directory_property( cmake_include_directories DIRECTORY ${pyx_path} INCLUDE_DIRECTORIES ) - list( APPEND cython_include_directories ${cmake_include_directories} ) - list( APPEND pyx_locations "${pyx_location}" ) - - # Determine dependencies. - # Add the pxd file will the same name as the given pyx file. - unset( corresponding_pxd_file CACHE ) - find_file( corresponding_pxd_file ${pyx_file_basename}.pxd - PATHS "${pyx_path}" ${cmake_include_directories} - NO_DEFAULT_PATH ) - if ( corresponding_pxd_file ) - list( APPEND pxd_dependencies "${corresponding_pxd_file}" ) - endif () - - # pxd files to check for additional dependencies. - set( pxds_to_check "${pyx_file}" "${pxd_dependencies}" ) - set( pxds_checked "" ) - set( number_pxds_to_check 1 ) - while ( ${number_pxds_to_check} GREATER 0 ) - foreach ( pxd ${pxds_to_check} ) - list( APPEND pxds_checked "${pxd}" ) - list( REMOVE_ITEM pxds_to_check "${pxd}" ) - - # check for C header dependencies - file( STRINGS "${pxd}" extern_from_statements - REGEX "cdef[ ]+extern[ ]+from.*$" ) - foreach ( statement ${extern_from_statements} ) - # Had trouble getting the quote in the regex - string( REGEX REPLACE "cdef[ ]+extern[ ]+from[ ]+[\"]([^\"]+)[\"].*" "\\1" header "${statement}" ) - unset( header_location CACHE ) - find_file( header_location ${header} PATHS ${cmake_include_directories} ) - if ( header_location ) - list( FIND c_header_dependencies "${header_location}" header_idx ) - if ( ${header_idx} LESS 0 ) - list( APPEND c_header_dependencies "${header_location}" ) - endif () - endif () - endforeach () - - # check for pxd dependencies - - # Look for cimport statements. - set( module_dependencies "" ) - file( STRINGS "${pxd}" cimport_statements REGEX cimport ) - foreach ( statement ${cimport_statements} ) - if ( ${statement} MATCHES from ) - string( REGEX REPLACE "from[ ]+([^ ]+).*" "\\1" module "${statement}" ) - else () - string( REGEX REPLACE "cimport[ ]+([^ ]+).*" "\\1" module "${statement}" ) - endif () - list( APPEND module_dependencies ${module} ) - endforeach () - list( REMOVE_DUPLICATES module_dependencies ) - # Add the module to the files to check, if appropriate. - foreach ( module ${module_dependencies} ) - unset( pxd_location CACHE ) - find_file( pxd_location ${module}.pxd - PATHS "${pyx_path}" ${cmake_include_directories} NO_DEFAULT_PATH ) - if ( pxd_location ) - list( FIND pxds_checked ${pxd_location} pxd_idx ) - if ( ${pxd_idx} LESS 0 ) - list( FIND pxds_to_check ${pxd_location} pxd_idx ) - if ( ${pxd_idx} LESS 0 ) - list( APPEND pxds_to_check ${pxd_location} ) - list( APPEND pxd_dependencies ${pxd_location} ) - endif () # if it is not already going to be checked - endif () # if it has not already been checked - endif () # if pxd file can be found - endforeach () # for each module dependency discovered - endforeach () # for each pxd file to check - list( LENGTH pxds_to_check number_pxds_to_check ) - endwhile () - endforeach () # pyx_file + if(_args_C) + set(_output_syntax "C") + endif() - # Set additional flags. - if ( CYTHON_ANNOTATE ) - set( annotate_arg "--annotate" ) - endif () + if(_args_CXX) + set(_output_syntax "CXX") + endif() + + # Doesn't select an input syntax - Cython + # defaults to 2 for Cython 2 and 3 for Cython 3 + set(_input_syntax "default") + + if(_args_PY2) + set(_input_syntax "PY2") + endif() + + if(_args_PY3) + set(_input_syntax "PY3") + endif() - if ( CYTHON_NO_DOCSTRINGS ) - set( no_docstrings_arg "--no-docstrings" ) - endif () + set(embed_arg "") + if(_embed_main) + set(embed_arg "--embed") + endif() + + set(cxx_arg "") + set(extension "c") + if(_output_syntax STREQUAL "CXX") + set(cxx_arg "--cplus") + set(extension "cxx") + endif() - if ( "${CMAKE_BUILD_TYPE}" STREQUAL "Debug" OR - "${CMAKE_BUILD_TYPE}" STREQUAL "RelWithDebInfo" ) - set( cython_debug_arg "--gdb" ) - endif () + set(py_version_arg "") + if(_input_syntax STREQUAL "PY2") + set(py_version_arg "-2") + elseif(_input_syntax STREQUAL "PY3") + set(py_version_arg "-3") + endif() - # Set version to 3 for Python 3 - set( version_arg "-3" ) + set(generated_file "${CMAKE_CURRENT_BINARY_DIR}/${_name}.${extension}") + set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE) + + set(_output_var ${_name}) + if(_args_OUTPUT_VAR) + set(_output_var ${_args_OUTPUT_VAR}) + endif() + set(${_output_var} ${generated_file} PARENT_SCOPE) + + file(RELATIVE_PATH generated_file_relative + ${CMAKE_BINARY_DIR} ${generated_file}) + + set(comment "Generating ${_output_syntax} source ${generated_file_relative}") + set(cython_include_directories "") + set(pxd_dependencies "") + set(c_header_dependencies "") + + # Get the include directories. + get_source_file_property(pyx_location ${_source_file} LOCATION) + get_filename_component(pyx_path ${pyx_location} PATH) + get_directory_property(cmake_include_directories + DIRECTORY ${pyx_path} + INCLUDE_DIRECTORIES) + list(APPEND cython_include_directories ${cmake_include_directories}) + + # Determine dependencies. + # Add the pxd file with the same basename as the given pyx file. + get_filename_component(pyx_file_basename ${_source_file} NAME_WE) + unset(corresponding_pxd_file CACHE) + find_file(corresponding_pxd_file ${pyx_file_basename}.pxd + PATHS "${pyx_path}" ${cmake_include_directories} + NO_DEFAULT_PATH) + if(corresponding_pxd_file) + list(APPEND pxd_dependencies "${corresponding_pxd_file}") + endif() + + # pxd files to check for additional dependencies + set(pxds_to_check "${_source_file}" "${pxd_dependencies}") + set(pxds_checked "") + set(number_pxds_to_check 1) + while(number_pxds_to_check GREATER 0) + foreach(pxd ${pxds_to_check}) + list(APPEND pxds_checked "${pxd}") + list(REMOVE_ITEM pxds_to_check "${pxd}") + + # look for C headers + file(STRINGS "${pxd}" extern_from_statements + REGEX "cdef[ ]+extern[ ]+from.*$") + foreach(statement ${extern_from_statements}) + # Had trouble getting the quote in the regex + string(REGEX REPLACE + "cdef[ ]+extern[ ]+from[ ]+[\"]([^\"]+)[\"].*" "\\1" + header "${statement}") + unset(header_location CACHE) + find_file(header_location ${header} PATHS ${cmake_include_directories}) + if(header_location) + list(FIND c_header_dependencies "${header_location}" header_idx) + if(${header_idx} LESS 0) + list(APPEND c_header_dependencies "${header_location}") + endif() + endif() + endforeach() + + # check for pxd dependencies + # Look for cimport statements. + set(module_dependencies "") + file(STRINGS "${pxd}" cimport_statements REGEX cimport) + foreach(statement ${cimport_statements}) + if(${statement} MATCHES from) + string(REGEX REPLACE + "from[ ]+([^ ]+).*" "\\1" + module "${statement}") + else() + string(REGEX REPLACE + "cimport[ ]+([^ ]+).*" "\\1" + module "${statement}") + endif() + list(APPEND module_dependencies ${module}) + endforeach() + + # check for pxi dependencies + # Look for include statements. + set(include_dependencies "") + file(STRINGS "${pxd}" include_statements REGEX include) + foreach(statement ${include_statements}) + string(REGEX REPLACE + "include[ ]+[\"]([^\"]+)[\"].*" "\\1" + module "${statement}") + list(APPEND include_dependencies ${module}) + endforeach() + + list(REMOVE_DUPLICATES module_dependencies) + list(REMOVE_DUPLICATES include_dependencies) + + # Add modules to the files to check, if appropriate. + foreach(module ${module_dependencies}) + unset(pxd_location CACHE) + find_file(pxd_location ${module}.pxd + PATHS "${pyx_path}" ${cmake_include_directories} + NO_DEFAULT_PATH) + if(pxd_location) + list(FIND pxds_checked ${pxd_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(FIND pxds_to_check ${pxd_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(APPEND pxds_to_check ${pxd_location}) + list(APPEND pxd_dependencies ${pxd_location}) + endif() # if it is not already going to be checked + endif() # if it has not already been checked + endif() # if pxd file can be found + endforeach() # for each module dependency discovered + + # Add includes to the files to check, if appropriate. + foreach(_include ${include_dependencies}) + unset(pxi_location CACHE) + find_file(pxi_location ${_include} + PATHS "${pyx_path}" ${cmake_include_directories} + NO_DEFAULT_PATH) + if(pxi_location) + list(FIND pxds_checked ${pxi_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(FIND pxds_to_check ${pxi_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(APPEND pxds_to_check ${pxi_location}) + list(APPEND pxd_dependencies ${pxi_location}) + endif() # if it is not already going to be checked + endif() # if it has not already been checked + endif() # if include file can be found + endforeach() # for each include dependency discovered + endforeach() # for each include file to check + + list(LENGTH pxds_to_check number_pxds_to_check) + endwhile() + + # Set additional flags. + set(annotate_arg "") + if(CYTHON_ANNOTATE) + set(annotate_arg "--annotate") + endif() + + set(cython_debug_arg "") + set(line_directives_arg "") + if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR + CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") + set(cython_debug_arg "--gdb") + set(line_directives_arg "--line-directives") + endif() # Include directory arguments. - list( REMOVE_DUPLICATES cython_include_directories ) - set( include_directory_arg "" ) - foreach ( _include_dir ${cython_include_directories} ) - set( include_directory_arg ${include_directory_arg} "-I" "${_include_dir}" ) - endforeach () + list(REMOVE_DUPLICATES cython_include_directories) + set(include_directory_arg "") + foreach(_include_dir ${cython_include_directories}) + set(include_directory_arg + ${include_directory_arg} "--include-dir" "${_include_dir}") + endforeach() - # Determining generated file name. - set( _generated_file "${CMAKE_CURRENT_BINARY_DIR}/${_name}.${extension}" ) - set_source_files_properties( ${_generated_file} PROPERTIES GENERATED TRUE ) - set( ${generated_file} ${_generated_file} PARENT_SCOPE ) + list(REMOVE_DUPLICATES pxd_dependencies) + list(REMOVE_DUPLICATES c_header_dependencies) - list( REMOVE_DUPLICATES pxd_dependencies ) - list( REMOVE_DUPLICATES c_header_dependencies ) + string(REGEX REPLACE " " ";" CYTHON_FLAGS_LIST "${CYTHON_FLAGS}") # Add the command to run the compiler. - add_custom_command( OUTPUT ${_generated_file} - COMMAND ${CYTHON_EXECUTABLE} - ARGS ${cxx_arg} ${include_directory_arg} ${version_arg} - ${annotate_arg} ${no_docstrings_arg} ${cython_debug_arg} ${CYTHON_FLAGS} - --output-file ${_generated_file} ${pyx_locations} - DEPENDS ${pyx_locations} ${pxd_dependencies} - IMPLICIT_DEPENDS ${pyx_lang} ${c_header_dependencies} - COMMENT ${comment} - ) + add_custom_command(OUTPUT ${generated_file} + COMMAND ${CYTHON_EXECUTABLE} + ARGS ${cxx_arg} ${include_directory_arg} ${py_version_arg} + ${embed_arg} ${annotate_arg} ${cython_debug_arg} + ${line_directives_arg} ${CYTHON_FLAGS_LIST} ${pyx_location} + --output-file ${generated_file} + DEPENDS ${_source_file} + ${pxd_dependencies} + IMPLICIT_DEPENDS ${_output_syntax} + ${c_header_dependencies} + COMMENT ${comment}) + + # NOTE(opadron): I thought about making a proper target, but after trying it + # out, I decided that it would be far too convenient to use the same name as + # the target for the extension module (e.g.: for single-file modules): + # + # ... + # add_cython_target(_module.pyx) + # add_library(_module ${_module}) + # ... + # + # The above example would not be possible since the "_module" target name + # would already be taken by the cython target. Since I can't think of a + # reason why someone would need the custom target instead of just using the + # generated file directly, I decided to leave this commented out. + # + # add_custom_target(${_name} DEPENDS ${generated_file}) # Remove their visibility to the user. - set( corresponding_pxd_file "" CACHE INTERNAL "" ) - set( header_location "" CACHE INTERNAL "" ) - set( pxd_location "" CACHE INTERNAL "" ) -endfunction() - -# cython_add_module( src1 src2 ... srcN ) -# Build the Cython Python module. -function( cython_add_module _name ) - set( pyx_module_sources "" ) - set( other_module_sources "" ) - foreach ( _file ${ARGN} ) - if ( ${_file} MATCHES ".*\\.py[x]?$" ) - list( APPEND pyx_module_sources ${_file} ) - else () - list( APPEND other_module_sources ${_file} ) - endif () - endforeach () - compile_pyx( ${_name} generated_file ${pyx_module_sources} ) - include_directories( ${Python_INCLUDE_DIRS} ) - python_add_module( ${_name} ${generated_file} ${other_module_sources} ) - if ( APPLE ) - set_target_properties( ${_name} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup" ) - else () - target_link_libraries( ${_name} ${Python_LIBRARIES} ) - endif () -endfunction() - -include( CMakeParseArguments ) -# cython_add_standalone_executable( _name [MAIN_MODULE src3.py] src1 src2 ... srcN ) -# Creates a standalone executable the given sources. -function( cython_add_standalone_executable _name ) - set( pyx_module_sources "" ) - set( other_module_sources "" ) - set( main_module "" ) - cmake_parse_arguments( cython_arguments "" "MAIN_MODULE" "" ${ARGN} ) - include_directories( ${Python_INCLUDE_DIRS} ) - foreach ( _file ${cython_arguments_UNPARSED_ARGUMENTS} ) - if ( ${_file} MATCHES ".*\\.py[x]?$" ) - get_filename_component( _file_we ${_file} NAME_WE ) - if ( "${_file_we}" STREQUAL "${_name}" ) - set( main_module "${_file}" ) - elseif ( NOT "${_file}" STREQUAL "${cython_arguments_MAIN_MODULE}" ) - set( PYTHON_MODULE_${_file_we}_static_BUILD_SHARED OFF ) - compile_pyx( "${_file_we}_static" generated_file "${_file}" ) - list( APPEND pyx_module_sources "${generated_file}" ) - endif () - else () - list( APPEND other_module_sources ${_file} ) - endif () - endforeach () - - if ( cython_arguments_MAIN_MODULE ) - set( main_module ${cython_arguments_MAIN_MODULE} ) - endif () - if ( NOT main_module ) - printError( "main module not found." ) - endif () - get_filename_component( main_module_we "${main_module}" NAME_WE ) - set( CYTHON_FLAGS ${CYTHON_FLAGS} --embed ) - compile_pyx( "${main_module_we}_static" generated_file ${main_module} ) - add_executable( ${_name} ${generated_file} ${pyx_module_sources} ${other_module_sources} ) - target_link_libraries( ${_name} ${Python_LIBRARIES} ${pyx_module_libs} ) + set(corresponding_pxd_file "" CACHE INTERNAL "") + set(header_location "" CACHE INTERNAL "") + set(pxd_location "" CACHE INTERNAL "") endfunction() diff --git a/pynest/CMakeLists.txt b/pynest/CMakeLists.txt index 8f2b696c7d..73d4291797 100644 --- a/pynest/CMakeLists.txt +++ b/pynest/CMakeLists.txt @@ -17,14 +17,18 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -if ( HAVE_PYTHON ) +find_package(Cython) +if ( HAVE_PYTHON ) if ( CYTHON_FOUND ) - include( UseCython ) - set_source_files_properties( nestkernel_api.pyx PROPERTIES CYTHON_IS_CXX TRUE ) - cython_add_module( nestkernel_api nestkernel_api.pyx ) + add_cython_target ( nestkernel_api nestkernel_api.pyx CXX PY3) + add_library(nestkernel_api MODULE ${nestkernel_api}) + set_target_properties(nestkernel_api PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}") else () - message( FATAL_ERROR "Building PyNEST requires Cython." ) + message( + FATAL_ERROR + "Building the PyNEST Python bindings requires Cython." + ) endif () # TODO PYNEST NG: Add models, once the refacoring of the module system is done. @@ -48,10 +52,8 @@ if ( HAVE_PYTHON ) -D_IS_PYNEST ) - install(DIRECTORY nest/ ${PROJECT_BINARY_DIR}/pynest/nest/ - DESTINATION ${CMAKE_INSTALL_PREFIX}/${PYEXECDIR}/nest - ) - - install( TARGETS nestkernel_api DESTINATION ${PYEXECDIR}/nest/ ) + # Install the PyNEST libraries into the PyNEST build folder, for Python packagers + # to pick up. + install( TARGETS nestkernel_api DESTINATION pynest/nest ) endif () From 3941a624757fbb70f1562ed614e1e32aa124d48b Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Thu, 14 Sep 2023 22:24:54 +0200 Subject: [PATCH 09/17] fix merge --- .../prepare_wheel_container.py | 7 + pynest/nest/__init__.py | 4 +- pynest/nest/_ll_api.py | 211 --- pynest/nest/lib/_hl_api_connection_helpers.py | 367 ---- pynest/nest/lib/_hl_api_connections.py | 396 ---- pynest/nest/lib/_hl_api_exceptions.py | 220 --- pynest/nest/lib/_hl_api_helper.py | 582 ------ pynest/nest/lib/_hl_api_info.py | 211 --- pynest/nest/lib/_hl_api_models.py | 198 -- pynest/nest/lib/_hl_api_nodes.py | 213 --- pynest/nest/lib/_hl_api_parallel_computing.py | 125 -- pynest/nest/lib/_hl_api_simulation.py | 345 ---- pynest/nest/lib/_hl_api_spatial.py | 1607 ----------------- pynest/nest/lib/_hl_api_types.py | 1221 ------------- pynest/nest/lib/hl_api_connection_helpers.py | 104 +- pynest/nest/lib/hl_api_connections.py | 96 +- pynest/nest/lib/hl_api_exceptions.py | 132 +- pynest/nest/lib/hl_api_helper.py | 216 ++- pynest/nest/lib/hl_api_info.py | 109 +- pynest/nest/lib/hl_api_models.py | 13 +- pynest/nest/lib/hl_api_nodes.py | 54 +- pynest/nest/lib/hl_api_parallel_computing.py | 38 +- pynest/nest/lib/hl_api_simulation.py | 49 +- pynest/nest/lib/hl_api_sonata.py | 666 ------- pynest/nest/lib/hl_api_spatial.py | 371 ++-- pynest/nest/lib/hl_api_types.py | 392 ++-- pynest/nest/logic/_hl_api_logic.py | 54 - pynest/nest/logic/hl_api_logic.py | 12 +- pynest/nest/math/_hl_api_math.py | 146 -- pynest/nest/plot/_raster_plot.py | 373 ---- pynest/nest/plot/_visualization.py | 79 - pynest/nest/plot/_voltage_trace.py | 264 --- pynest/nest/random/_hl_api_random.py | 130 -- .../{hl_api_server.py => 2hl_api_server.py} | 0 .../{hl_api_spatial.py => hl_api_spatial2.py} | 0 .../_hl_api_spatial_distributions.py | 296 +-- ...ns.py => hl_api_spatial_distributions2.py} | 0 pynest/nestkernel_api.pyx | 2 +- pyproject.toml | 2 +- 39 files changed, 1019 insertions(+), 8286 deletions(-) rename extras/wheelbuild/prepare_container.py => build_support/prepare_wheel_container.py (90%) delete mode 100644 pynest/nest/_ll_api.py delete mode 100644 pynest/nest/lib/_hl_api_connection_helpers.py delete mode 100644 pynest/nest/lib/_hl_api_connections.py delete mode 100644 pynest/nest/lib/_hl_api_exceptions.py delete mode 100644 pynest/nest/lib/_hl_api_helper.py delete mode 100644 pynest/nest/lib/_hl_api_info.py delete mode 100644 pynest/nest/lib/_hl_api_models.py delete mode 100644 pynest/nest/lib/_hl_api_nodes.py delete mode 100644 pynest/nest/lib/_hl_api_parallel_computing.py delete mode 100644 pynest/nest/lib/_hl_api_simulation.py delete mode 100644 pynest/nest/lib/_hl_api_spatial.py delete mode 100644 pynest/nest/lib/_hl_api_types.py delete mode 100644 pynest/nest/lib/hl_api_sonata.py delete mode 100644 pynest/nest/logic/_hl_api_logic.py delete mode 100644 pynest/nest/math/_hl_api_math.py delete mode 100644 pynest/nest/plot/_raster_plot.py delete mode 100644 pynest/nest/plot/_visualization.py delete mode 100644 pynest/nest/plot/_voltage_trace.py delete mode 100644 pynest/nest/random/_hl_api_random.py rename pynest/nest/server/{hl_api_server.py => 2hl_api_server.py} (100%) rename pynest/nest/spatial/{hl_api_spatial.py => hl_api_spatial2.py} (100%) rename pynest/nest/spatial_distributions/{hl_api_spatial_distributions.py => hl_api_spatial_distributions2.py} (100%) diff --git a/extras/wheelbuild/prepare_container.py b/build_support/prepare_wheel_container.py similarity index 90% rename from extras/wheelbuild/prepare_container.py rename to build_support/prepare_wheel_container.py index 444caa3f17..b39e1b7178 100644 --- a/extras/wheelbuild/prepare_container.py +++ b/build_support/prepare_wheel_container.py @@ -57,5 +57,12 @@ def install_gsl(): ) run_sequence(install_seq) +def install_omp(): + """Use the yum package manager of CentOS to install OpemMP libraries""" + install_seq = ( + "yum install libomp-dev", + ) + run_sequence(install_seq) + main() diff --git a/pynest/nest/__init__.py b/pynest/nest/__init__.py index 54999d19ca..80cba88aae 100644 --- a/pynest/nest/__init__.py +++ b/pynest/nest/__init__.py @@ -61,7 +61,7 @@ import sys # noqa import types # noqa -from ._ll_api import KernelAttribute # noqa +from .ll_api import KernelAttribute # noqa try: import versionchecker # noqa: F401 @@ -80,7 +80,7 @@ class NestModule(types.ModuleType): from . import math # noqa from . import random # noqa from . import spatial_distributions # noqa - from ._ll_api import set_communicator + from .ll_api import set_communicator def __init__(self, name): super().__init__(name) diff --git a/pynest/nest/_ll_api.py b/pynest/nest/_ll_api.py deleted file mode 100644 index a3053d6a9b..0000000000 --- a/pynest/nest/_ll_api.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _ll_api.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Low-level API of PyNEST Module -""" - -# Since this is a low level module, we need some more trickery, thus: -# pylint: disable=wrong-import-position - -import functools -import inspect -import keyword -import os -import sys - -# This is a workaround for readline import errors encountered with Anaconda -# Python running on Ubuntu, when invoked from the terminal -# "python -c 'import nest'" -if "linux" in sys.platform and "Anaconda" in sys.version: - import readline # noqa: F401 - -# This is a workaround to avoid segmentation faults when importing -# scipy *after* nest. See https://github.com/numpy/numpy/issues/2521 -try: - import scipy # noqa: F401 -except ImportError: - pass - -# Make MPI-enabled NEST import properly. The underlying problem is that the -# shared object pynestkernel dynamically opens other libraries that open -# yet other libraries. -sys.setdlopenflags(os.RTLD_NOW | os.RTLD_GLOBAL) - -from . import nestkernel_api as nestkernel # noqa -from .lib._hl_api_exceptions import NESTError, NESTErrors - -__all__ = [ - "set_communicator", - # 'take_array_index', - "KernelAttribute", -] - - -initialized = False - - -def set_communicator(comm): - """Set global communicator for NEST. - - Parameters - ---------- - comm: MPI.Comm from mpi4py - - Raises - ------ - nestkernel.NESTError - """ - - if "mpi4py" not in sys.modules: - raise NESTError("set_communicator: mpi4py not loaded.") - - # TODO-PYNEST-NG: set_communicator - # engine.set_communicator(comm) - - -class KernelAttribute: - """ - Descriptor that dispatches attribute access to the nest kernel. - """ - - def __init__(self, typehint, description, readonly=False, default=None, localonly=False): - self._readonly = readonly - self._localonly = localonly - self._default = default - - readonly = readonly and "**read only**" - localonly = localonly and "**local only**" - - self.__doc__ = ( - description - + ("." if default is None else f", defaults to ``{default}``.") - + ("\n\n" if readonly or localonly else "") - + ", ".join(c for c in (readonly, localonly) if c) - + f"\n\n:type: {typehint}" - ) - - def __set_name__(self, cls, name): - self._name = name - self._full_status = name == "kernel_status" - - def __get__(self, instance, cls=None): - if instance is None: - return self - - status_root = nestkernel.llapi_get_kernel_status() - - if self._full_status: - return status_root - else: - return status_root[self._name] - - def __set__(self, instance, value): - if self._readonly: - msg = f"`{self._name}` is a read only kernel attribute." - raise AttributeError(msg) - nestkernel.llapi_set_kernel_status({self._name: value}) - - -def init(argv): - """Initializes NEST. - - If the environment variable PYNEST_QUIET is set, NEST will not print - welcome text containing the version and other information. Likewise, - if the environment variable PYNEST_DEBUG is set, NEST starts in debug - mode. Note that the same effect can be achieved by using the - commandline arguments --quiet and --debug respectively. - - Parameters - ---------- - argv : list - Command line arguments, passed to the NEST kernel - - Raises - ------ - nestkernel.NESTError.PyNESTError - """ - - global initialized - - if initialized: - raise NESTErrors.PyNESTError("NEST already initialized.") - - # Some commandline arguments of NEST and Python have the same - # name, but different meaning. To avoid unintended behavior, we - # handle NEST's arguments here and pass it a modified copy, while - # we leave the original list unchanged for further use by the user - # or other modules. - nest_argv = argv[:] - - quiet = "--quiet" in nest_argv or "PYNEST_QUIET" in os.environ - if "--quiet" in nest_argv: - nest_argv.remove("--quiet") - if "--debug" in nest_argv: - nest_argv.remove("--debug") - if "--sli-debug" in nest_argv: - nest_argv.remove("--sli-debug") - nest_argv.append("--debug") - - if "PYNEST_DEBUG" in os.environ and "--debug" not in nest_argv: - nest_argv.append("--debug") - - path = os.path.dirname(__file__) - nestkernel.init(nest_argv) - - if not quiet: - print("NEST initialized successfully!") - - # Dirty hack to get tab-completion for models in IPython. - try: - __IPYTHON__ - except NameError: - pass - else: - from .lib._hl_api_simulation import GetKernelStatus # noqa - - keyword_lists = ( - "connection_rules", - "node_models", - "recording_backends", - "rng_types", - "stimulation_backends", - "synapse_models", - ) - for kwl in keyword_lists: - keyword.kwlist += GetKernelStatus(kwl) - - else: - from .lib.hl_api_simulation import GetKernelStatus # noqa - - keyword_lists = ( - "connection_rules", - "node_models", - "recording_backends", - "rng_types", - "stimulation_backends", - "synapse_models", - ) - for kwl in keyword_lists: - keyword.kwlist += GetKernelStatus(kwl) - - -init(sys.argv) diff --git a/pynest/nest/lib/_hl_api_connection_helpers.py b/pynest/nest/lib/_hl_api_connection_helpers.py deleted file mode 100644 index c879f76d13..0000000000 --- a/pynest/nest/lib/_hl_api_connection_helpers.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_connection_helpers.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -These are helper functions to ease the definition of the -Connect function. -""" - -import copy -import numpy as np - -from .._ll_api import * -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel -from ._hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter -from ._hl_api_exceptions import NESTErrors - -__all__ = [ - "_connect_layers_needed", - "_connect_spatial", - "_process_conn_spec", - "_process_spatial_projections", - "_process_syn_spec", -] - - -def _process_conn_spec(conn_spec): - """Processes the connectivity specifications from None, string or dictionary to a dictionary.""" - if conn_spec is None: - # Use default conn_spec - return {"rule": "all_to_all"} - elif isinstance(conn_spec, str): - processed_conn_spec = {"rule": conn_spec} - return processed_conn_spec - elif isinstance(conn_spec, dict): - return conn_spec - else: - raise TypeError("conn_spec must be a string or dict") - - -def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_arrays): - """Processes the synapse specifications from None, string or dictionary to a dictionary.""" - syn_spec = copy.copy(syn_spec) - - if syn_spec is None: - # for use_connect_arrays, return "static_synapse" by default - if use_connect_arrays: - return {"synapse_model": "static_synapse"} - return syn_spec - - if isinstance(syn_spec, CollocatedSynapses): - return syn_spec - - if isinstance(syn_spec, str): - return {"synapse_model": syn_spec} - - rule = conn_spec["rule"] - if isinstance(syn_spec, dict): - if "synapse_model" in syn_spec and not isinstance( - syn_spec["synapse_model"], str - ): - raise kernel.NESTError("'synapse_model' must be a string") - for key, value in syn_spec.items(): - # if value is a list, it is converted to a numpy array - if isinstance(value, (list, tuple)): - value = np.asarray(value) - - if isinstance(value, (np.ndarray, np.generic)): - if len(value.shape) == 1: - if rule == "one_to_one": - if value.shape[0] != prelength: - if use_connect_arrays: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}.".format( - key, prelength - ) - ) - else: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}, a scalar or a dictionary.".format( - key, prelength - ) - ) - else: - syn_spec[key] = value - elif rule == "fixed_total_number": - if "N" in conn_spec and value.shape[0] != conn_spec["N"]: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}, a scalar or a dictionary".format( - key, conn_spec["N"] - ) - ) - else: - syn_spec[key] = value - else: - raise kernel.NESTError( - "'{}' has the wrong type. One-dimensional parameter arrays can only be used in " - "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format( - key - ) - ) - - elif len(value.shape) == 2: - if rule == "all_to_all": - if value.shape[0] != postlength or value.shape[1] != prelength: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}x{} (n_target x n_sources), a scalar " - "or a dictionary.".format(key, postlength, prelength) - ) - else: - syn_spec[key] = value.flatten() - elif rule == "fixed_indegree": - indegree = conn_spec["indegree"] - if value.shape[0] != postlength or value.shape[1] != indegree: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}x{} (n_target x indegree), a scalar " - "or a dictionary.".format(key, postlength, indegree) - ) - else: - syn_spec[key] = value.flatten() - elif rule == "fixed_outdegree": - outdegree = conn_spec["outdegree"] - if value.shape[0] != prelength or value.shape[1] != outdegree: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}x{} (n_sources x outdegree), a scalar " - "or a dictionary.".format(key, prelength, outdegree) - ) - else: - syn_spec[key] = value.flatten() - else: - raise kernel.NESTError( - "'{}' has the wrong type. Two-dimensional parameter arrays can only be used in " - "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format( - key - ) - ) - - # check that "synapse_model" is there for use_connect_arrays - if use_connect_arrays and "synapse_model" not in syn_spec: - syn_spec["synapse_model"] = "static_synapse" - - return syn_spec - - # If we get here, syn_spec is of illegal type. - raise TypeError("syn_spec must be a string, dict or CollocatedSynapses object") - - -def _process_spatial_projections(conn_spec, syn_spec): - """ - Processes the connection and synapse specifications to a single dictionary - for the SLI function `ConnectLayers`. - """ - allowed_conn_spec_keys = [ - "mask", - "allow_multapses", - "allow_autapses", - "rule", - "indegree", - "outdegree", - "p", - "use_on_source", - "allow_oversized_mask", - ] - allowed_syn_spec_keys = [ - "weight", - "delay", - "synapse_model", - "synapse_label", - "receptor_type", - ] - for key in conn_spec.keys(): - if key not in allowed_conn_spec_keys: - raise ValueError( - "'{}' is not allowed in conn_spec when connecting with mask or kernel".format( - key - ) - ) - - projections = {} - projections.update(conn_spec) - if "p" in conn_spec: - projections["kernel"] = projections.pop("p") - if syn_spec is not None: - if isinstance(syn_spec, CollocatedSynapses): - for syn_list in syn_spec.syn_specs: - for key in syn_list.keys(): - if key not in allowed_syn_spec_keys: - raise ValueError( - "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( - key - ) - ) - projections.update({"synapse_parameters": syn_spec.syn_specs}) - else: - for key in syn_spec.keys(): - if key not in allowed_syn_spec_keys: - raise ValueError( - "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( - key - ) - ) - projections.update(syn_spec) - if conn_spec["rule"] == "fixed_indegree": - if "use_on_source" in conn_spec: - raise ValueError( - "'use_on_source' can only be set when using pairwise_bernoulli" - ) - projections["connection_type"] = "pairwise_bernoulli_on_source" - projections["number_of_connections"] = projections.pop("indegree") - elif conn_spec["rule"] == "fixed_outdegree": - if "use_on_source" in conn_spec: - raise ValueError( - "'use_on_source' can only be set when using pairwise_bernoulli" - ) - projections["connection_type"] = "pairwise_bernoulli_on_target" - projections["number_of_connections"] = projections.pop("outdegree") - elif conn_spec["rule"] == "pairwise_bernoulli": - if "use_on_source" in conn_spec and conn_spec["use_on_source"]: - projections["connection_type"] = "pairwise_bernoulli_on_source" - projections.pop("use_on_source") - else: - projections["connection_type"] = "pairwise_bernoulli_on_target" - if "use_on_source" in projections: - projections.pop("use_on_source") - else: - raise kernel.NESTError( - "When using kernel or mask, the only possible connection rules are " - "'pairwise_bernoulli', 'fixed_indegree', or 'fixed_outdegree'" - ) - projections.pop("rule") - return projections - - -def _connect_layers_needed(conn_spec, syn_spec): - """Determins if connection has to be made with the SLI function `ConnectLayers`.""" - if isinstance(conn_spec, dict): - # If a conn_spec entry is based on spatial properties, we must use ConnectLayers. - for key, item in conn_spec.items(): - if isinstance(item, Parameter) and item.is_spatial(): - return True - # We must use ConnectLayers in some additional cases. - rule_is_bernoulli = "pairwise_bernoulli" in str(conn_spec["rule"]) - if ( - "mask" in conn_spec - or ("p" in conn_spec and not rule_is_bernoulli) - or "use_on_source" in conn_spec - ): - return True - # If a syn_spec entry is based on spatial properties, we must use ConnectLayers. - if isinstance(syn_spec, dict): - for key, item in syn_spec.items(): - if isinstance(item, Parameter) and item.is_spatial(): - return True - elif isinstance(syn_spec, CollocatedSynapses): - return any( - [ - _connect_layers_needed(conn_spec, syn_param) - for syn_param in syn_spec.syn_specs - ] - ) - # If we get here, there is not need to use ConnectLayers. - return False - - -def _connect_spatial(pre, post, projections): - """Connect `pre` to `post` using the specifications in `projections`.""" - - def fixdict(d): - for k, v in d.items(): - if isinstance(v, dict): - d[k] = fixdict(v) - elif isinstance(v, Mask) or isinstance(v, Parameter): - d[k] = v._datum - return d - - nestkernel.llapi_connect_layers(pre, post, fixdict(projections)) - - -def _process_input_nodes(pre, post, conn_spec): - """ - Check the properties of `pre` and `post` nodes: - - * If `conn_spec` is 'one_to_one', no uniqueness check is performed; the - "regular" one-to-one connect is used if both inputs are NodeCollection, - "connect_arrays" is used otherwise. - * If both `pre` and `post` are NodeCollections or can be converted to - NodeCollections (i.e. contain unique IDs), then proceed to "regular" - connect (potentially after conversion to NodeCollection). - * If both `pre` and `post` are arrays and contain non-unique items, then - we proceed to "connect_arrays". - * If at least one of them has non-unique items and they have different - sizes, then raise an error. - """ - use_connect_arrays = False - - # check for 'one_to_one' conn_spec - one_to_one_cspec = ( - conn_spec - if not isinstance(conn_spec, dict) - else conn_spec.get("rule", "all_to_all") == "one_to_one" - ) - - # check and convert input types - pre_is_nc, post_is_nc = True, True - - if not isinstance(pre, NodeCollection): - # skip uniqueness check for connect_arrays compatible `conn_spec` - if not one_to_one_cspec and len(set(pre)) == len(pre): - pre = NodeCollection(pre) - else: - pre_is_nc = False - - if not isinstance(post, NodeCollection): - # skip uniqueness check for connect_arrays compatible `conn_spec` - if not one_to_one_cspec and len(set(post)) == len(post): - post = NodeCollection(post) - else: - post_is_nc = False - - if not pre_is_nc or not post_is_nc: - if len(pre) != len(post): - raise NESTErrors.ArgumentType( - "Connect", - "If `pre` or `post` contain non-unique IDs, then they must have the same length.", - ) - - # convert to arrays - pre = np.asarray(pre) - post = np.asarray(post) - - # check array type - if not issubclass(pre.dtype.type, (int, np.integer)): - raise NESTErrors.ArgumentType("Connect", " `pre` IDs should be integers.") - - if not issubclass(post.dtype.type, (int, np.integer)): - raise NESTErrors.ArgumentType("Connect", " `post` IDs should be integers.") - - # check dimension - if not (pre.ndim == 1 and post.ndim == 1): - raise ValueError("Sources and targets must be 1-dimensional arrays") - - use_connect_arrays = True - - if use_connect_arrays and not one_to_one_cspec: - raise ValueError( - "When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'." - ) - - return use_connect_arrays, pre, post diff --git a/pynest/nest/lib/_hl_api_connections.py b/pynest/nest/lib/_hl_api_connections.py deleted file mode 100644 index 17f7ba3909..0000000000 --- a/pynest/nest/lib/_hl_api_connections.py +++ /dev/null @@ -1,396 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_connections.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions for connection handling -""" - -import numpy - -from .._ll_api import connect_arrays -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel - -from ._hl_api_connection_helpers import ( - _process_input_nodes, - _connect_layers_needed, - _connect_spatial, - _process_conn_spec, - _process_spatial_projections, - _process_syn_spec, -) -from ._hl_api_nodes import Create -from ._hl_api_parallel_computing import NumProcesses -from ._hl_api_types import NodeCollection, SynapseCollection, Mask, Parameter - -__all__ = [ - "Connect", - "Disconnect", - "GetConnections", -] - - -def GetConnections(source=None, target=None, synapse_model=None, synapse_label=None): - """Return a `SynapseCollection` representing the connection identifiers. - - Any combination of `source`, `target`, `synapse_model` and - `synapse_label` parameters is permitted. - - Parameters - ---------- - source : NodeCollection, optional - Source node IDs, only connections from these - pre-synaptic neurons are returned - target : NodeCollection, optional - Target node IDs, only connections to these - postsynaptic neurons are returned - synapse_model : str, optional - Only connections with this synapse type are returned - synapse_label : int, optional - (non-negative) only connections with this synapse label are returned - - Returns - ------- - SynapseCollection: - Object representing the source-node_id, target-node_id, target-thread, synapse-id, port of connections, see - :py:class:`.SynapseCollection` for more. - - Raises - ------ - TypeError - - Notes - ----- - Only connections with targets on the MPI process executing - the command are returned. - """ - - params = {} - - if source is not None: - if isinstance(source, NodeCollection): - params["source"] = source - else: - raise TypeError("source must be NodeCollection.") - - if target is not None: - if isinstance(target, NodeCollection): - params["target"] = target - else: - raise TypeError("target must be NodeCollection.") - - if synapse_model is not None: - params["synapse_model"] = synapse_model - - if synapse_label is not None: - params["synapse_label"] = synapse_label - - conns = nestkernel.llapi_get_connections(params) - - return conns - - -def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=False): - """ - Connect `pre` nodes to `post` nodes. - - Nodes in `pre` and `post` are connected using the specified connectivity - (`all-to-all` by default) and synapse type (:cpp:class:`static_synapse ` by default). - Details depend on the connectivity rule. - - Lists of synapse models and connection rules are available as - ``nest.synapse_models`` and ``nest.connection_rules``, respectively. - - Parameters - ---------- - pre : NodeCollection (or array-like object) - Presynaptic nodes, as object representing the IDs of the nodes - post : NodeCollection (or array-like object) - Postsynaptic nodes, as object representing the IDs of the nodes - conn_spec : str or dict, optional - Specifies connectivity rule, see below - syn_spec : str or dict, optional - Specifies synapse model, see below - return_synapsecollection: bool - Specifies whether or not we should return a :py:class:`.SynapseCollection` of pre and post connections - - Raises - ------ - kernel.NESTError - - Notes - ----- - It is possible to connect NumPy arrays of node IDs one-to-one by passing the arrays as `pre` and `post`, - specifying `'one_to_one'` for `conn_spec`. - In that case, the arrays may contain non-unique IDs. - You may also specify weight, delay, and receptor type for each connection as NumPy arrays in the `syn_spec` - dictionary. - This feature is currently not available when MPI is used; trying to connect arrays with more than one - MPI process will raise an error. - - If pre and post have spatial positions, a `mask` can be specified as a dictionary. The mask define which - nodes are considered as potential targets for each source node. Connections with spatial nodes can also - use `nest.spatial_distributions` as parameters, for instance for the probability `p`. - - **Connectivity specification (conn_spec)** - - Available rules and associated parameters:: - - - 'all_to_all' (default) - - 'one_to_one' - - 'fixed_indegree', 'indegree' - - 'fixed_outdegree', 'outdegree' - - 'fixed_total_number', 'N' - - 'pairwise_bernoulli', 'p' - - 'symmetric_pairwise_bernoulli', 'p' - - See :ref:`conn_rules` for more details, including example usage. - - **Synapse specification (syn_spec)** - - The synapse model and its properties can be given either as a string - identifying a specific synapse model (default: :cpp:class:`static_synapse `) or - as a dictionary specifying the synapse model and its parameters. - - Available keys in the synapse specification dictionary are:: - - - 'synapse_model' - - 'weight' - - 'delay' - - 'receptor_type' - - any parameters specific to the selected synapse model. - - See :ref:`synapse_spec` for details, including example usage. - - All parameters are optional and if not specified, the default values - of the synapse model will be used. The key 'synapse_model' identifies the - synapse model, this can be one of NEST's built-in synapse models - or a user-defined model created via :py:func:`.CopyModel`. - - If `synapse_model` is not specified the default model :cpp:class:`static_synapse ` - will be used. - - Distributed parameters can be defined through NEST's different parametertypes. NEST has various - random parameters, spatial parameters and distributions (only accesseable for nodes with spatial positions), - logical expressions and mathematical expressions, which can be used to define node and connection parameters. - - To see all available parameters, see documentation defined in distributions, logic, math, - random and spatial modules. - - See Also - --------- - :ref:`connection_management` - """ - use_connect_arrays, pre, post = _process_input_nodes(pre, post, conn_spec) - - # Converting conn_spec to dict, without putting it on the SLI stack. - processed_conn_spec = _process_conn_spec(conn_spec) - # If syn_spec is given, its contents are checked, and if needed converted - # to the right formats. - processed_syn_spec = _process_syn_spec( - syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays - ) - - # If pre and post are arrays of node IDs, and conn_spec is unspecified, - # the node IDs are connected one-to-one. - if use_connect_arrays: - if return_synapsecollection: - raise ValueError( - "SynapseCollection cannot be returned when connecting two arrays of node IDs" - ) - - if processed_syn_spec is None: - raise ValueError( - "When connecting two arrays of node IDs, the synapse specification dictionary must " - "be specified and contain at least the synapse model." - ) - - # In case of misspelling - if "weights" in processed_syn_spec: - raise ValueError("To specify weights, use 'weight' in syn_spec.") - if "delays" in processed_syn_spec: - raise ValueError("To specify delays, use 'delay' in syn_spec.") - - weights = ( - numpy.array(processed_syn_spec["weight"]) - if "weight" in processed_syn_spec - else None - ) - delays = ( - numpy.array(processed_syn_spec["delay"]) - if "delay" in processed_syn_spec - else None - ) - - try: - synapse_model = processed_syn_spec["synapse_model"] - except KeyError: - raise ValueError( - "When connecting two arrays of node IDs, the synapse specification dictionary must " - "contain a synapse model." - ) - - # Split remaining syn_spec entries to key and value arrays - reduced_processed_syn_spec = { - k: processed_syn_spec[k] - for k in set(processed_syn_spec.keys()).difference( - set(("weight", "delay", "synapse_model")) - ) - } - - if len(reduced_processed_syn_spec) > 0: - syn_param_keys = numpy.array( - list(reduced_processed_syn_spec.keys()), dtype=numpy.string_ - ) - syn_param_values = numpy.zeros([len(reduced_processed_syn_spec), len(pre)]) - - for i, value in enumerate(reduced_processed_syn_spec.values()): - syn_param_values[i] = value - else: - syn_param_keys = None - syn_param_values = None - - connect_arrays( - pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values - ) - return - - if not isinstance(pre, NodeCollection): - raise TypeError("Not implemented, presynaptic nodes must be a NodeCollection") - if not isinstance(post, NodeCollection): - raise TypeError("Not implemented, postsynaptic nodes must be a NodeCollection") - - # In some cases we must connect with ConnectLayers instead. - if _connect_layers_needed(processed_conn_spec, processed_syn_spec): - # Check that pre and post are layers - if pre.spatial is None: - raise TypeError("Presynaptic NodeCollection must have spatial information") - if post.spatial is None: - raise TypeError("Presynaptic NodeCollection must have spatial information") - - # Create the projection dictionary - spatial_projections = _process_spatial_projections( - processed_conn_spec, processed_syn_spec - ) - _connect_spatial(pre._datum, post._datum, spatial_projections) - else: - nestkernel.llapi_connect( - pre._datum, post._datum, processed_conn_spec, processed_syn_spec - ) - - if return_synapsecollection: - return GetConnections(pre, post) - - -def Disconnect(*args, conn_spec=None, syn_spec=None): - """Disconnect connections in a SynnapseCollection, or `pre` neurons from `post` neurons. - - When specifying `pre` and `post` nodes, they are disconnected using the specified disconnection - rule (one-to-one by default) and synapse type (:cpp:class:`static_synapse ` by default). - Details depend on the disconnection rule. - - Parameters - ---------- - args : SynapseCollection or NodeCollections - Either a collection of connections to disconnect, or pre- and postsynaptic nodes given as `NodeCollection`s - conn_spec : str or dict - Disconnection rule when specifying pre- and postsynaptic nodes, see below - syn_spec : str or dict - Synapse specifications when specifying pre- and postsynaptic nodes, see below - - Notes - ------- - - **conn_spec** - - Apply the same rules as for connectivity specs in the :py:func:`.Connect` method - - Possible choices of the conn_spec are - :: - - 'one_to_one' - - 'all_to_all' - - **syn_spec** - - The synapse model and its properties can be specified either as a string naming - a synapse model (the list of all available synapse models can be gotten via - ``nest.synapse_models``) or as a dictionary as described below. - - Note that only the synapse type is checked when we disconnect and that if - `syn_spec` is given as a non-empty dictionary, the 'synapse_model' parameter must - be present. - - If no synapse model is specified the default model - :cpp:class:`static_synapse ` will be used. - - Available keys in the synapse dictionary are: - :: - - - 'synapse_model' - - 'weight' - - 'delay', - - 'receptor_type' - - parameters specific to the synapse model chosen - - 'synapse_model' determines the synapse type, taken from pre-defined synapse - types in NEST or manually specified synapses created via :py:func:`.CopyModel`. - - All other parameters are not currently implemented. - - Notes - ----- - `Disconnect` only disconnects explicitly specified nodes. - - """ - - if len(args) == 1: - synapsecollection = args[0] - if not isinstance(synapsecollection, SynapseCollection): - raise TypeError( - "Arguments must be either a SynapseCollection or two NodeCollections" - ) - if conn_spec is not None or syn_spec is not None: - raise ValueError( - "When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified" - ) - synapsecollection.disconnect() - elif len(args) == 2: - # Fill default values - conn_spec = "one_to_one" if conn_spec is None else conn_spec - syn_spec = "static_synapse" if syn_spec is None else syn_spec - if is_string(conn_spec): - conn_spec = {"rule": conn_spec} - if is_string(syn_spec): - syn_spec = {"synapse_model": syn_spec} - pre, post = args - if not isinstance(pre, NodeCollection) or not isinstance(post, NodeCollection): - raise TypeError( - "Arguments must be either a SynapseCollection or two NodeCollections" - ) - sps(pre) - sps(post) - sps(conn_spec) - sps(syn_spec) - sr("Disconnect_g_g_D_D") - else: - raise TypeError( - "Arguments must be either a SynapseCollection or two NodeCollections" - ) diff --git a/pynest/nest/lib/_hl_api_exceptions.py b/pynest/nest/lib/_hl_api_exceptions.py deleted file mode 100644 index cadd5797f9..0000000000 --- a/pynest/nest/lib/_hl_api_exceptions.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_exceptions.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - - -class NESTMappedException(type): - """Metaclass for exception namespace that dynamically creates exception classes. - - If a class (self) of this (meta)-type has an unknown attribute requested, __getattr__ defined - below gets called, creating a class with that name (the error name) and with an __init__ taking - commandname and errormessage (as created in the source) which is a closure on the parent and - errorname as well, with a parent of default type (self.default_parent) or - self.parents[errorname] if defined. """ - - def __getattr__(cls, errorname): - """Creates a class of type "errorname" which is a child of cls.default_parent or - cls.parents[errorname] if one is defined. - - This __getattr__ function also stores the class permanently as an attribute of cls for - re-use where cls is actually the class that triggered the getattr (the class that - NESTMappedException is a metaclass of). """ - - # Dynamic class construction, first check if we know its parent - if errorname in cls.parents: - parent = getattr(cls, cls.parents[errorname]) - else: # otherwise, get the default (SLIException) - parent = cls.default_parent - - # and now dynamically construct the new class - # not NESTMappedException, since that would mean the metaclass would let the new class inherit - # this __getattr__, allowing unintended dynamic construction of attributes - newclass = type( - cls.__name__ + '.' + errorname, - (parent,), - { - '__init__': cls.init(parent, errorname), - '__doc__': - """Dynamically created exception {} from {}. - - Created for the namespace: {}. - Parent exception: {}. - """.format(errorname, cls.source, cls.__name__, parent.__name__) - } - ) - - # Cache for reuse: __getattr__ should now not get called if requested again - setattr(cls, errorname, newclass) - - # And now we return the exception - return newclass - - -class NESTErrors(metaclass=NESTMappedException): - """Namespace for NEST exceptions, including dynamically created classes from SLI. - - Dynamic exception creation is through __getattr__ defined in the metaclass NESTMappedException. - """ - - class NESTError(Exception): - """Base exception class for all NEST exceptions. - """ - - def __init__(self, message): - """Initializer for NESTError base class. - - Parameters: - ----------- - message: str - full error message to report. - """ - - Exception.__init__(self, message) - self.message = message - - class SLIException(NESTError): - """Base class for all exceptions coming from sli. - """ - - def __init__(self, commandname, errormessage, errorname='SLIException'): - """Initialize function. - - Parameters: - ----------- - errorname: error name from SLI. - commandname: command name from SLI. - errormessage: message from SLI. - """ - message = "{} in PyNEST function {}: {}".format(errorname, commandname, errormessage) - NESTErrors.NESTError.__init__(self, message) - - self.errorname = errorname - self.commandname = commandname - self.errormessage = errormessage - - class PyNESTError(NESTError): - """Exceptions produced from Python/Cython code. - """ - pass - - @staticmethod - def init(parent, errorname): - """ Static class method to construct init's for SLIException children. - - Construct our new init with closure on errorname (as a default value) and parent. - The default value allows the __init__ to be chained and set by the leaf child. - This also moves the paramerization of __init__ away from the class construction logic - and next to the SLIException init. - - Parameters: - ---------- - parent: the ancestor of the class needed to properly walk up the MRO (not possible with super() or - super(type,...) because of the dynamic creation of the function - (used as a closure on the constructed __init__). - errorname: the class name for information purposes - internally (used as a closure on the constructed __init__). - """ - - def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwargs): - # recursively init the parent class: all of this is only needed to properly set errorname - parent.__init__(self, commandname, errormessage, *args, errorname=errorname, **kwargs) - - docstring = \ - """Initialization function. - - Parameters: - ----------- - commandname: sli command name. - errormessage: sli error message. - errorname: set by default ("{}") or passed in by child (shouldn't be explicitly set - when creating an instance) - *args, **kwargs: passed through to base class. - - self will be a descendant of {}. - """.format(errorname, parent.__name__) - - try: - __init__.__doc__ = docstring - except AttributeError: - __init__.__func__.__doc__ = docstring - - return __init__ - - # source: the dynamically created exceptions come from SLI - # default_parent: the dynamically created exceptions are descended from SLIExcepton - # parents: unless they happen to be mapped in this list to another exception descended from SLIException - # these should be updated when new exceptions in sli are created that aren't directly descended - # from SLIException (but nothing bad will happen, it's just that otherwise they'll be directly - # descended from SLIException instead of an intermediate exception; they'll still be constructed - # and useable) - source = "SLI" - default_parent = SLIException - parents = { - 'TypeMismatch': 'InterpreterError', - 'SystemSignal': 'InterpreterError', - 'RangeCheck': 'InterpreterError', - 'ArgumentType': 'InterpreterError', - 'BadParameterValue': 'SLIException', - 'DictError': 'InterpreterError', - 'UndefinedName': 'DictError', - 'EntryTypeMismatch': 'DictError', - 'StackUnderflow': 'InterpreterError', - 'IOError': 'SLIException', - 'UnaccessedDictionaryEntry': 'DictError', - 'UnknownModelName': 'KernelException', - 'NewModelNameExists': 'KernelException', - 'ModelInUse': 'KernelException', - 'UnknownSynapseType': 'KernelException', - 'UnknownNode': 'KernelException', - 'NoThreadSiblingsAvailable': 'KernelException', - 'LocalNodeExpected': 'KernelException', - 'NodeWithProxiesExpected': 'KernelException', - 'UnknownReceptorType': 'KernelException', - 'IncompatibleReceptorType': 'KernelException', - 'UnknownPort': 'KernelException', - 'IllegalConnection': 'KernelException', - 'InexistentConnection': 'KernelException', - 'UnknownThread': 'KernelException', - 'BadDelay': 'KernelException', - 'UnexpectedEvent': 'KernelException', - 'UnsupportedEvent': 'KernelException', - 'BadProperty': 'KernelException', - 'BadParameter': 'KernelException', - 'DimensionMismatch': 'KernelException', - 'DistributionError': 'KernelException', - 'InvalidDefaultResolution': 'KernelException', - 'InvalidTimeInModel': 'KernelException', - 'StepMultipleRequired': 'KernelException', - 'TimeMultipleRequired': 'KernelException', - 'GSLSolverFailure': 'KernelException', - 'NumericalInstability': 'KernelException', - 'KeyError': 'KernelException', - 'MUSICPortUnconnected': 'KernelException', - 'MUSICPortHasNoWidth': 'KernelException', - 'MUSICPortAlreadyPublished': 'KernelException', - 'MUSICSimulationHasRun': 'KernelException', - 'MUSICChannelUnknown': 'KernelException', - 'MUSICPortUnknown': 'KernelException', - 'MUSICChannelAlreadyMapped': 'KernelException' - } - - -# So we don't break any code that currently catches a nest.NESTError -NESTError = NESTErrors.NESTError diff --git a/pynest/nest/lib/_hl_api_helper.py b/pynest/nest/lib/_hl_api_helper.py deleted file mode 100644 index 425bbe6b38..0000000000 --- a/pynest/nest/lib/_hl_api_helper.py +++ /dev/null @@ -1,582 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_helper.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -These are helper functions to ease the definition of the high-level -API of the PyNEST wrapper. -""" - -import warnings -import json -import functools -import textwrap -import subprocess -import os -import re -import shlex -import sys -import numpy -import pydoc - -from string import Template - -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel -import nest - -__all__ = [ - 'broadcast', - 'deprecated', - 'get_parameters', - 'get_parameters_hierarchical_addressing', - 'get_wrapped_text', - 'is_coercible_to_sli_array', - 'is_iterable', - 'is_sequence_of_connections', - 'is_sequence_of_node_ids', - 'load_help', - 'model_deprecation_warning', - 'restructure_data', - 'show_deprecation_warning', - 'show_help_with_pager', - 'SuppressedDeprecationWarning', - 'uni_str', -] - -# These flags are used to print deprecation warnings only once. -# Only flags for special cases need to be entered here, such as special models -# or function parameters, all flags for deprecated functions will be registered -# by the @deprecated decorator, and therefore does not manually need to be placed here. -_deprecation_warning = {'deprecated_model': {'deprecation_issued': False, - 'replacement': 'replacement_mod'}, - 'iaf_psc_alpha_canon': {'deprecation_issued': False, - 'replacement': 'iaf_psc_alpha_ps'}, - 'pp_pop_psc_delta': {'deprecation_issued': False, - 'replacement': 'gif_pop_psc_exp'}} - - -def format_Warning(message, category, filename, lineno, line=None): - """Formats deprecation warning.""" - - return '%s:%s: %s:%s\n' % (filename, lineno, category.__name__, message) - - -warnings.formatwarning = format_Warning - - -def get_wrapped_text(text, width=80): - """Formats a given multiline string to wrap at a given width, while - preserving newlines (and removing excessive whitespace). - - Parameters - ---------- - text : str - String to format - - Returns - ------- - str: - Wrapped string - """ - - lines = text.split("\n") - lines = [textwrap.fill(" ".join(line.split()), width=width) for line in lines] - return "\n".join(lines) - - -def show_deprecation_warning(func_name, alt_func_name=None, text=None): - """Shows a deprecation warning for a function. - - Parameters - ---------- - func_name : str - Name of the deprecated function - alt_func_name : str, optional - Name of the function to use instead. Needed if text=None - text : str, optional - Text to display instead of standard text - """ - if func_name in _deprecation_warning: - if not _deprecation_warning[func_name]['deprecation_issued']: - if text is None: - text = ("{0} is deprecated and will be removed in a future version of NEST.\n" - "Please use {1} instead!").format(func_name, alt_func_name) - text = get_wrapped_text(text) - - warnings.warn('\n' + text) # add LF so text starts on new line - _deprecation_warning[func_name]['deprecation_issued'] = True - - -# Since we need to pass extra arguments to the decorator, we need a -# decorator factory. See http://stackoverflow.com/questions/15564512 -def deprecated(alt_func_name, text=None): - """Decorator for deprecated functions. - - Shows a warning and calls the original function. - - Parameters - ---------- - alt_func_name : str, optional - Name of the function to use instead, may be empty string - text : str, optional - Text to display instead of standard text - - Returns - ------- - function: - Decorator function - """ - - def deprecated_decorator(func): - _deprecation_warning[func.__name__] = {'deprecation_issued': False} - - @functools.wraps(func) - def new_func(*args, **kwargs): - show_deprecation_warning(func.__name__, alt_func_name, text=text) - return func(*args, **kwargs) - return new_func - - return deprecated_decorator - - -def is_iterable(seq): - """Return True if the given object is an iterable, False otherwise. - - Parameters - ---------- - seq : object - Object to check - - Returns - ------- - bool: - True if object is an iterable - """ - - try: - iter(seq) - except TypeError: - return False - - return True - - -def is_coercible_to_sli_array(seq): - """Checks whether a given object is coercible to a SLI array - - Parameters - ---------- - seq : object - Object to check - - Returns - ------- - bool: - True if object is coercible to a SLI array - """ - - import sys - - if sys.version_info[0] >= 3: - return isinstance(seq, (tuple, list, range)) - else: - return isinstance(seq, (tuple, list, xrange)) - - -def is_sequence_of_connections(seq): - """Checks whether low-level API accepts seq as a sequence of - connections. - - Parameters - ---------- - seq : object - Object to check - - Returns - ------- - bool: - True if object is an iterable of dictionaries or - subscriptables of CONN_LEN - """ - - try: - cnn = next(iter(seq)) - return isinstance(cnn, dict) or len(cnn) == kernel.CONN_LEN - except TypeError: - pass - - return False - - -def is_sequence_of_node_ids(seq): - """Checks whether the argument is a potentially valid sequence of - node IDs (non-negative integers). - - Parameters - ---------- - seq : object - Object to check - - Returns - ------- - bool: - True if object is a potentially valid sequence of node IDs - """ - - return all(isinstance(n, int) and n >= 0 for n in seq) - - -def broadcast(item, length, allowed_types, name="item"): - """Broadcast item to given length. - - Parameters - ---------- - item : object - Object to broadcast - length : int - Length to broadcast to - allowed_types : list - List of allowed types - name : str, optional - Name of item - - Returns - ------- - object: - The original item broadcasted to sequence form of length - - Raises - ------ - TypeError - - - """ - - if isinstance(item, allowed_types): - return length * (item, ) - elif len(item) == 1: - return length * item - elif len(item) != length: - raise TypeError( - "'{0}' must be a single value, a list with one element or a list with {1} elements.".format(name, length)) - return item - - -def __show_help_in_modal_window(obj, help_text): - """Open modal window with help text - - Parameters - ---------- - obj : string - The filename of the help file - help_text : string - Full help_text - """ - - help_text = json.dumps(help_text) - style = "" - s = Template(""" - require( - ["base/js/dialog"], - function(dialog) { - dialog.modal({ - title: '$jstitle', - body: $jstext, - buttons: { - 'close': {} - } - }); - } - ); - """) - - from IPython.display import HTML, Javascript, display - display(HTML(style)) - display(Javascript(s.substitute(jstitle=obj, jstext=help_text))) - - -def get_help_fname(obj): - """Get file name for help object - - Raises FileNotFound if no help is available for ``obj``. - - Parameters - ---------- - obj : string - Object to get help filename for - - Returns - ------- - string: - File name of the help text for obj - """ - - docdir = sli_func("statusdict/prgdocdir ::") - help_fname = os.path.join(docdir, 'html', 'models', f'{obj}.rst') - - if os.path.isfile(help_fname): - return help_fname - else: - raise FileNotFoundError(f"Sorry, there is no help for '{obj}'.") - - -def load_help(obj): - """Returns documentation of the given object in RST format - - Parameters - ---------- - obj : string - Object to display help for - - Returns - ------- - string: - The documentation of the object or None if no help is available - """ - - help_fname = get_help_fname(obj) - with open(help_fname, 'r', encoding='utf-8') as help_file: - help_text = help_file.read() - return help_text - - -def show_help_with_pager(obj): - """Display help text for the given object in the Python pager - - If called from within a Jupyter notebook, display help in a modal - window instead of in the pager. - - Parameters - ---------- - obj : object - Object to display - - """ - - def check_nb(): - try: - return get_ipython().__class__.__name__.startswith('ZMQ') - except NameError: - return False - - help_text = load_help(obj) - - if check_nb(): - __show_help_in_modal_window(obj + '.rst', help_text) - return - - pydoc.pager(help_text) - - -def __is_executable(path, candidate): - """Returns true for executable files.""" - - candidate = os.path.join(path, candidate) - return os.access(candidate, os.X_OK) and os.path.isfile(candidate) - - -def model_deprecation_warning(model): - """Checks whether the model is to be removed in a future version of NEST. - If so, a deprecation warning is issued. - - Parameters - ---------- - model: str - Name of model - """ - - if model in _deprecation_warning: - if not _deprecation_warning[model]['deprecation_issued']: - text = ("The {0} model is deprecated and will be removed in a future version of NEST, " - "use {1} instead.").format(model, _deprecation_warning[model]['replacement']) - show_deprecation_warning(model, text=text) - - -def restructure_data(result, keys): - """ - Restructure list of status dictionaries or list of parameter values to dict with lists or single list or int. - - Parameters - ---------- - result: list - list of status dictionaries or list (of lists) of parameter values. - keys: string or list of strings - name(s) of properties - - Returns - ------- - int, list or dict - """ - - if isinstance(keys, str): - if len(result) != 1: - all_keys = sorted({key for result_dict in result for key in result_dict}) - final_result = [] - - for result_dict in result: - if keys in result_dict.keys(): - final_result.append(result_dict[keys]) - elif keys in all_keys: - final_result.append(None) - final_result = tuple(final_result) - else: - final_result = result[0][keys] - - elif is_iterable(keys): - final_result = ({key: [val[i] for val in result] - for i, key in enumerate(keys)} if len(result) != 1 - else {key: val[i] for val in result - for i, key in enumerate(keys)}) - - elif keys is None: - if len(result) != 1: - all_keys = sorted({key for result_dict in result for key in result_dict}) - final_result = {} - - for key in all_keys: - final_result[key] = [] - for result_dict in result: - if key in result_dict.keys(): - final_result[key].append(result_dict[key]) - else: - final_result[key].append(None) - else: - final_result = {key: result_dict[key] for result_dict in result for key in result[0]} - return final_result - - -def get_parameters(nc, param): - """ - Get parameters from nodes. - - Used by NodeCollections `get()` function. - - Parameters - ---------- - nc: NodeCollection - nodes to get values from - param: string or list of strings - string or list of string naming model properties. - - Returns - ------- - int, list: - param is a string so the value(s) is returned - dict: - param is a list of string so a dictionary is returned - """ - # param is single literal - if isinstance(param, str): - result = nestkernel.llapi_get_nc_status(nc._datum, param) - elif is_iterable(param): - result = {param_name: get_parameters(nc, param_name) for param_name in param} - else: - raise TypeError("Params should be either a string or an iterable") - - return result - - -def get_parameters_hierarchical_addressing(nc, params): - """ - Get parameters from nodes, hierarchical case. - - Used by NodeCollections `get()` function. - - Parameters - ---------- - nc: NodeCollection - nodes to get values from - params: tuple - first value in the tuple should be a string, second can be a string or a list of string. - The first value corresponds to the path into the hierarchical structure - while the second value corresponds to the name(s) of the desired - properties. - - Returns - ------- - int, list: - params[-1] is a string so the value(s) is returned - dict: - params[-1] is a list of string so a dictionary is returned - """ - - # Right now, NEST only allows get(arg0, arg1) for hierarchical - # addressing, where arg0 must be a string and arg1 can be string - # or list of strings. - if isinstance(params[0], str): - value_list = nc.get(params[0]) - if type(value_list) != tuple: - value_list = (value_list,) - else: - raise TypeError('First argument must be a string, specifying path into hierarchical dictionary') - - result = restructure_data(value_list, None) - - if isinstance(params[-1], str): - result = result[params[-1]] - else: - result = {key: result[key] for key in params[-1]} - return result - - -class SuppressedDeprecationWarning: - """ - Context manager turning off deprecation warnings for given methods. - - Think thoroughly before use. This context should only be used as a way to - make sure examples do not display deprecation warnings, that is, used in - functions called from examples, and not as a way to make tedious - deprecation warnings dissapear. - """ - - def __init__(self, no_dep_funcs): - """ - Parameters - ---------- - no_dep_funcs: Function name (string) or iterable of function names - for which to suppress deprecation warnings - """ - - self._no_dep_funcs = (no_dep_funcs if not isinstance(no_dep_funcs, str) else (no_dep_funcs, )) - self._deprecation_status = {} - sr('verbosity') # Use sli-version as we cannon import from info because of cirular inclusion problem - self._verbosity_level = spp() - - def __enter__(self): - - for func_name in self._no_dep_funcs: - self._deprecation_status[func_name] = _deprecation_warning[func_name] # noqa - _deprecation_warning[func_name]['deprecation_issued'] = True - - # Suppress only if verbosity level is deprecated or lower - if self._verbosity_level <= sli_func('M_DEPRECATED'): - # Use sli-version as we cannon import from info because of cirular inclusion problem - sr("{} setverbosity".format(sli_func('M_WARNING'))) - - def __exit__(self, *args): - - # Reset the verbosity level and deprecation warning status - sr("{} setverbosity".format((self._verbosity_level))) - - for func_name, deprec_dict in self._deprecation_status.items(): - _deprecation_warning[func_name]['deprecation_issued'] = ( - deprec_dict['deprecation_issued']) diff --git a/pynest/nest/lib/_hl_api_info.py b/pynest/nest/lib/_hl_api_info.py deleted file mode 100644 index 3b9a0c8d1d..0000000000 --- a/pynest/nest/lib/_hl_api_info.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_info.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions to get information on NEST. -""" - -import sys -import os -import textwrap -import webbrowser - -from ._hl_api_helper import broadcast, is_iterable, load_help, show_help_with_pager -from ._hl_api_types import to_json -from .. import nestkernel_api as nestkernel -import nest - -__all__ = [ - 'authors', - 'get_argv', - 'get_verbosity', - 'help', - 'helpdesk', - 'message', - 'set_verbosity', - 'sysinfo', -] - - -def sysinfo(): - """Print information on the platform on which NEST was compiled. - - """ - - sr("sysinfo") - - -def authors(): - """Print the authors of NEST. - - """ - - sr("authors") - - -def helpdesk(): - """Open the NEST documentation index in a browser. - - This command opens the NEST documentation index page using the - system's default browser. - - Please note that the help pages will only be available if you ran - ``make html`` prior to installing NEST. For more details, see - :ref:`doc_workflow`. - - """ - - docdir = sli_func("statusdict/prgdocdir ::") - help_fname = os.path.join(docdir, 'html', 'index.html') - - if not os.path.isfile(help_fname): - msg = "Sorry, the help index cannot be opened. " - msg += "Did you run 'make html' before running 'make install'?" - raise FileNotFoundError(msg) - - webbrowser.open_new(f"file://{help_fname}") - - -def help(obj=None, return_text=False): - """Display the help page for the given object in a pager. - - If ``return_text`` is omitted or explicitly given as ``False``, - this command opens the help text for ``object`` in the default - pager using the ``pydoc`` module. - - If ``return_text`` is ``True``, the help text is returned as a - string in reStructuredText format instead of displaying it. - - Parameters - ---------- - obj : object, optional - Object to display help for - return_text : bool, optional - Option for returning the help text - - Returns - ------- - None or str - The help text of the object if `return_text` is `True`. - - """ - - if obj is not None: - try: - if return_text: - return load_help(obj) - else: - show_help_with_pager(obj) - except FileNotFoundError: - print(textwrap.dedent(f""" - Sorry, there is no help for model '{obj}'. - Use the Python help() function to obtain help on PyNEST functions.""")) - else: - print(nest.__doc__) - - -def get_argv(): - """Return argv as seen by NEST. - - This is similar to Python :code:`sys.argv` but might have changed after - MPI initialization. - - Returns - ------- - tuple - Argv, as seen by NEST - - """ - - sr('statusdict') - statusdict = spp() - return statusdict['argv'] - - -def message(level, sender, text): - """Print a message using message system of NEST. - - Parameters - ---------- - level : - Level - sender : - Message sender - text : str - Text to be sent in the message - - """ - - sps(level) - sps(sender) - sps(text) - sr('message') - - -def get_verbosity(): - """Return verbosity level of NEST's messages. - - - M_ALL=0, display all messages - - M_INFO=10, display information messages and above - - M_DEPRECATED=18, display deprecation warnings and above - - M_WARNING=20, display warning messages and above - - M_ERROR=30, display error messages and above - - M_FATAL=40, display failure messages and above - - Returns - ------- - int: - The current verbosity level - """ - - sr('verbosity') - return spp() - - -def set_verbosity(level): - """Change verbosity level for NEST's messages. - - - M_ALL=0, display all messages - - M_INFO=10, display information messages and above - - M_DEPRECATED=18, display deprecation warnings and above - - M_WARNING=20, display warning messages and above - - M_ERROR=30, display error messages and above - - M_FATAL=40, display failure messages and above - - .. note:: - - To suppress the usual output when NEST starts up (e.g., the welcome message and - version information), you can run ``export PYNEST_QUIET=1`` on the command - line before executing your simulation script. - - Parameters - ---------- - level : str, default: 'M_INFO' - Can be one of 'M_FATAL', 'M_ERROR', 'M_WARNING', 'M_DEPRECATED', - 'M_INFO' or 'M_ALL'. - """ - - # TODO-PYNEST-NG: There are no SLI messages anymore, so verbosity - # is now irrelevant and should be replaced when a - # replacement for message() exists. - - # sr("{} setverbosity".format(level)) - pass diff --git a/pynest/nest/lib/_hl_api_models.py b/pynest/nest/lib/_hl_api_models.py deleted file mode 100644 index 468bc7c7dc..0000000000 --- a/pynest/nest/lib/_hl_api_models.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_models.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions for model handling -""" - -from .._ll_api import * -from .. import nestkernel_api as nestkernel -from ._hl_api_helper import deprecated, is_iterable, model_deprecation_warning -from ._hl_api_types import to_json - -__all__ = [ - "ConnectionRules", - "CopyModel", - "GetDefaults", - "Models", - "SetDefaults", -] - - -@deprecated("nest.node_models or nest.synapse_models") -@check_stack -def Models(mtype="all", sel=None): - """Return a tuple of neuron, device, or synapse model names. - - Parameters - ---------- - mtype : str, optional - Use ``mtype='nodes'`` to only get neuron and device models, - or ``mtype='synapses'`` to only get synapse models. - sel : str, optional - Filter results and only return models containing ``sel``. - - Returns - ------- - tuple - Available model names, sorted by name - - Raises - ------ - ValueError - Description - - Notes - ----- - - Synapse model names ending in ``_hpc`` require less memory because of - thread-local indices for target neuron IDs and fixed ``rport``s of 0. - - Synapse model names ending in ``_lbl`` allow to assign an integer label - (``synapse_label``) to each individual synapse, at the cost of increased - memory requirements. - - """ - - if mtype not in ("all", "nodes", "synapses"): - raise ValueError("mtype has to be one of 'all', 'nodes', or 'synapses'") - - models = [] - - if mtype in ("all", "nodes"): - models += GetKernelStatus("node_models") - - if mtype in ("all", "synapses"): - models += GetKernelStatus("synapse_models") - - if sel is not None: - models = [x for x in models if sel in x] - - models.sort() - - return tuple(models) - - -@deprecated("nest.connection_rules") -@check_stack -def ConnectionRules(): - """Return a tuple of all available connection rules, sorted by name. - - Returns - ------- - tuple - Available connection rules, sorted by name - - """ - - return tuple(sorted(GetKernelStatus("connection_rules"))) - - -@check_stack -def SetDefaults(model, params, val=None): - """Set defaults for the given model or recording backend. - - New default values are used for all subsequently created instances - of the model. - - Parameters - ---------- - model : str - Name of the model or recording backend - params : str or dict - Dictionary of new default parameter values - val : str, optional - If given, ``params`` has to be the name of a parameter. - - """ - - if val is not None: - if isinstance(params, str): - params = {params: val} - - nestkernel.llapi_set_defaults(model, params) - - -@check_stack -def GetDefaults(model, keys=None, output=""): - """Return defaults of the given model or recording backend. - - Parameters - ---------- - model : str - Name of the model or recording backend - keys : str or list, optional - String or a list of strings naming model properties. `GetDefaults` then - returns a single value or a list of values belonging to the keys - given. - output : str, optional - Whether the returned data should be in a format - (``output='json'``). Default is ''. - - Returns - ------- - dict - A dictionary of default parameters. - type - If keys is a string, the corrsponding default parameter is returned. - list - If keys is a list of strings, a list of corrsponding default parameters - is returned. - str : - If `output` is ``json``, returns parameters in JSON format. - - Raises - ------ - TypeError - - """ - - result = nestkernel.llapi_get_defaults(model) - - if keys is not None: - if is_iterable(keys) and not isinstance(keys, str): - result = [result[key] for key in keys] - else: - result = result[keys] - - if output == "json": - result = to_json(result) - - return result - - -@check_stack -def CopyModel(existing, new, params=None): - """Create a new model by copying an existing one. - - Parameters - ---------- - existing : str - Name of existing model - new : str - Name of the copied model - params : dict, optional - Default parameters assigned to the copy. Not provided parameters are - taken from the existing model. - - """ - - model_deprecation_warning(existing) - - nestkernel.llapi_copy_model(existing, new, {} if params is None else params) diff --git a/pynest/nest/lib/_hl_api_nodes.py b/pynest/nest/lib/_hl_api_nodes.py deleted file mode 100644 index 8da5ed0bce..0000000000 --- a/pynest/nest/lib/_hl_api_nodes.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_nodes.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions for node handling -""" - -import warnings - -import nest -from .._ll_api import * -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel -from ._hl_api_helper import is_iterable, model_deprecation_warning -from ._hl_api_types import NodeCollection, Parameter - -__all__ = [ - "Create", - "GetLocalNodeCollection", - "GetNodes", - "PrintNodes", -] - - -def Create(model, n=1, params=None, positions=None): - """Create one or more nodes. - - Generates `n` new network objects of the supplied model type. If `n` is not - given, a single node is created. Note that if setting parameters of the - nodes fail, the nodes will still have been created. - - Note - ---- - During network construction, create all nodes representing model neurons first, then all nodes - representing devices (generators, recorders, or detectors), or all devices first and then all neurons. - Otherwise, network connection can be slow, especially in parallel simulations of networks - with many devices. - - Parameters - ---------- - model : str - Name of the model to create - n : int, optional - Number of nodes to create - params : dict or list, optional - Parameters for the new nodes. Can be any of the following: - - - A dictionary with either single values or lists of size n. - The single values will be applied to all nodes, while the lists will be distributed across - the nodes. Both single values and lists can be given at the same time. - - A list with n dictionaries, one dictionary for each node. - Values may be :py:class:`.Parameter` objects. If omitted, - the model's defaults are used. - positions: :py:class:`.spatial.grid` or :py:class:`.spatial.free` object, optional - Object describing spatial positions of the nodes. If omitted, the nodes have no spatial attachment. - - Returns - ------- - NodeCollection: - Object representing the IDs of created nodes, see :py:class:`.NodeCollection` for more. - - Raises - ------ - NESTError - If setting node parameters fail. However, the nodes will still have - been created. - TypeError - If the positions object is of wrong type. - """ - - model_deprecation_warning(model) - - # If any of the elements in the parameter dictionary is either an array-like object, - # or a NEST parameter, we create the nodes first, then set the given values. If not, - # we can pass the parameter specification to SLI when the nodes are created. - iterable_or_parameter_in_params = True - - if not isinstance(n, int): - raise TypeError("n must be an integer") - - # PYNEST-NG: can we support the usecase above by passing the dict into ll_create? - if isinstance(params, dict) and params: # if params is a dict and not empty - iterable_or_parameter_in_params = any( - is_iterable(v) or isinstance(v, Parameter) for k, v in params.items() - ) - - if positions is not None: - # Explicitly retrieve lazy loaded spatial property from the module class. - # This is needed because the automatic lookup fails. See #2135. - spatial = getattr(nest.NestModule, "spatial") - # We only accept positions as either a free object or a grid object. - if not isinstance(positions, (spatial.free, spatial.grid)): - raise TypeError( - "`positions` must be either a nest.spatial.free or a nest.spatial.grid object" - ) - layer_specs = {"elements": model} - layer_specs["edge_wrap"] = positions.edge_wrap - if isinstance(positions, spatial.free): - layer_specs["positions"] = positions.pos - # If the positions are based on a parameter object, the number of nodes must be specified. - if isinstance(positions.pos, Parameter): - layer_specs["n"] = n - else: - # If positions is not a free object, it must be a grid object. - if n > 1: - raise kernel.NESTError( - "Cannot specify number of nodes with grid positions" - ) - layer_specs["shape"] = positions.shape - if positions.center is not None: - layer_specs["center"] = positions.center - if positions.extent is not None: - layer_specs["extent"] = positions.extent - - layer = nestkernel.llapi_create_spatial(layer_specs) - layer.set(params if params else {}) - return layer - - node_ids = nestkernel.llapi_create(model, n) - - if isinstance(params, dict) and params: # if params is a dict and not empty - try: - node_ids.set(params) - except Exception: - warnings.warn( - "Setting node parameters failed, but nodes have already been " - + f"created! The node IDs of the new nodes are: {node_ids}." - ) - raise - - return node_ids - - -def PrintNodes(): - """Print the `node ID` ranges and `model names` of all the nodes in the network.""" - - print(nestkernel.llapi_print_nodes()) - - -def GetNodes(properties={}, local_only=False): - """Return all nodes with the given properties as `NodeCollection`. - - Parameters - ---------- - properties : dict, optional - Only node IDs of nodes matching the properties given in the - dictionary exactly will be returned. Matching properties with float - values (e.g. the membrane potential) may fail due to tiny numerical - discrepancies and should be avoided. Note that when a params dict is - present, thread parallelization is not possible, the function will - be run thread serial. - local_only : bool, optional - If True, only node IDs of nodes simulated on the local MPI process will - be returned. By default, node IDs of nodes in the entire simulation - will be returned. This requires MPI communication and may slow down - the script. - - Returns - ------- - NodeCollection: - `NodeCollection` of nodes - """ - - return nestkernel.llapi_get_nodes(properties, local_only) - - -def GetLocalNodeCollection(nc): - """Get local nodes of a `NodeCollection` as a new `NodeCollection`. - - This function returns the local nodes of a `NodeCollection`. If there are no - local elements, an empty `NodeCollection` is returned. - - Parameters - ---------- - nc: NodeCollection - `NodeCollection` for which to get local nodes - - Returns - ------- - NodeCollection: - Object representing the local nodes of the given `NodeCollection` - """ - if not isinstance(nc, NodeCollection): - raise TypeError( - "GetLocalNodeCollection requires a NodeCollection in order to run" - ) - - rank = Rank() - num_procs = NumProcesses() - first_in_nc = nc[0].global_id - first_index = ((rank - first_in_nc % num_procs) + num_procs) % num_procs - if first_index <= len(nc): - return nc[first_index : len(nc) : num_procs] - else: - return NodeCollection([]) diff --git a/pynest/nest/lib/_hl_api_parallel_computing.py b/pynest/nest/lib/_hl_api_parallel_computing.py deleted file mode 100644 index 608268292c..0000000000 --- a/pynest/nest/lib/_hl_api_parallel_computing.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_parallel_computing.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions for parallel computing -""" - -from .._ll_api import * -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel - -__all__ = [ - "NumProcesses", - "Rank", - "GetLocalVPs", - "SetAcceptableLatency", - "SetMaxBuffered", - "SyncProcesses", -] - - -@check_stack -def Rank(): - """Return the MPI rank of the local process. - - Returns - ------- - int: - MPI rank of the local process - - Note - ---- - DO NOT USE `Rank()` TO EXECUTE ANY FUNCTION IMPORTED FROM THE `nest` - MODULE ON A SUBSET OF RANKS IN AN MPI-PARALLEL SIMULATION. - - This will lead to unpredictable behavior. Symptoms may be an - error message about non-synchronous global random number generators - or deadlocks during simulation. In the worst case, the simulation - may complete but generate nonsensical results. - """ - - return nestkernel.llapi_get_rank() - - -@check_stack -def NumProcesses(): - """Return the overall number of MPI processes. - - Returns - ------- - int: - Number of overall MPI processes - """ - - return nestkernel.llapi_get_num_mpi_processes() - - -@check_stack -def SetAcceptableLatency(port_name, latency): - """Set the acceptable `latency` (in ms) for a MUSIC port. - - Parameters - ---------- - port_name : str - MUSIC port to set latency for - latency : float - Latency in ms - """ - - sps(kernel.SLILiteral(port_name)) - sps(latency) - sr("SetAcceptableLatency") - - -@check_stack -def SetMaxBuffered(port_name, size): - """Set the maximum buffer size for a MUSIC port. - - Parameters - ---------- - port_name : str - MUSIC port to set buffer size for - size : int - Buffer size - """ - - sps(kernel.SLILiteral(port_name)) - sps(size) - sr("SetMaxBuffered") - - -@check_stack -def SyncProcesses(): - """Synchronize all MPI processes.""" - - sr("SyncProcesses") - - -@check_stack -def GetLocalVPs(): - """Return iterable representing the VPs local to the MPI rank.""" - - # Compute local VPs as range based on round-robin logic in - # VPManager::get_vp(). mpitest_get_local_vps ensures this is in - # sync with the kernel. - n_vp = sli_func("GetKernelStatus /total_num_virtual_procs get") - return range(Rank(), n_vp, NumProcesses()) diff --git a/pynest/nest/lib/_hl_api_simulation.py b/pynest/nest/lib/_hl_api_simulation.py deleted file mode 100644 index c75a4ce10e..0000000000 --- a/pynest/nest/lib/_hl_api_simulation.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_simulation.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions for simulation control -""" - -from contextlib import contextmanager -import warnings - -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel - -from .._ll_api import * -from ._hl_api_helper import is_iterable -from ._hl_api_parallel_computing import Rank - -__all__ = [ - "Cleanup", - "DisableStructuralPlasticity", - "EnableStructuralPlasticity", - "GetKernelStatus", - "Install", - "Prepare", - "ResetKernel", - "Run", - "RunManager", - "SetKernelStatus", - "Simulate", -] - - -@check_stack -def Simulate(t): - """Simulate the network for `t` milliseconds. - - Parameters - ---------- - t : float - Time to simulate in ms - - See Also - -------- - RunManager - - """ - - nestkernel.llapi_simulate(t) - - -@check_stack -def Run(t): - """Simulate the network for `t` milliseconds. - - Parameters - ---------- - t : float - Time to simulate in ms - - Notes - ------ - - Call between `Prepare` and `Cleanup` calls, or within a - ``with RunManager`` clause. - - Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup() - - `Prepare` must be called before `Run` to calibrate the system, and - `Cleanup` must be called after `Run` to close files, cleanup handles, and - so on. After `Cleanup`, `Prepare` can and must be called before more `Run` - calls. - - Be careful about modifying the network or neurons between `Prepare` and `Cleanup` - calls. In particular, do not call `Create`, `Connect`, or `SetKernelStatus`. - Changing the membrane potential `V_m` of neurons or synaptic weights (but not delays!) - will in most cases work as expected, while changing membrane or synaptic times - constants will not work correctly. If in doubt, assume that changes may cause - undefined behavior and check these thoroughly. - - Also note that `local_spike_counter` is reset each time you call `Run`. - - See Also - -------- - Prepare, Cleanup, RunManager, Simulate - - """ - nestkernel.llapi_run(t) - - -@check_stack -def Prepare(): - """Calibrate the system before a `Run` call. Not needed for `Simulate`. - - See Also - -------- - Run, Cleanup - - """ - nestkernel.llapi_prepare() - - -@check_stack -def Cleanup(): - """Cleans up resources after a `Run` call. Not needed for `Simulate`. - - Closes state for a series of runs, such as flushing and closing files. - A `Prepare` is needed after a `Cleanup` before any more calls to `Run`. - - See Also - -------- - Run, Prepare - - """ - nestkernel.llapi_cleanup() - - -@contextmanager -def RunManager(): - """ContextManager for `Run` - - Calls `Prepare` before a series of `Run` calls, and calls `Cleanup` at end. - - For example: - - :: - - with RunManager(): - for _ in range(10): - Run(100) - # extract results - - Notes - ----- - Be careful about modifying the network or neurons between `Prepare` and `Cleanup` - calls. In particular, do not call `Create`, `Connect`, or `SetKernelStatus`. - Changing the membrane potential `V_m` of neurons or synaptic weights (but not delays!) - will in most cases work as expected, while changing membrane or synaptic times - constants will not work correctly. If in doubt, assume that changes may cause - undefined behavior and check these thoroughly. - - See Also - -------- - Prepare, Run, Cleanup, Simulate - - """ - - Prepare() - try: - yield - finally: - Cleanup() - - -@check_stack -def ResetKernel(): - """Reset the simulation kernel. - - This will destroy the network as well as all custom models created with - :py:func:`.CopyModel`. Calling this function is equivalent to restarting NEST. - - In particular, - - * all network nodes - * all connections - * all user-defined neuron and synapse models - are deleted, and - - * time - * random generators - are reset. The only exception is that dynamically loaded modules are not - unloaded. This may change in a future version of NEST. - - """ - nestkernel.llapi_reset_kernel() - - -@check_stack -def SetKernelStatus(params): - """Set parameters for the simulation kernel. - - See the documentation of :ref:`sec:kernel_attributes` for a valid - list of params. - - Parameters - ---------- - - params : dict - Dictionary of parameters to set. - - See Also - -------- - - GetKernelStatus - - """ - # We need the nest module to be fully initialized in order to access the - # _kernel_attr_names and _readonly_kernel_attrs. As hl_api_simulation is - # imported during nest module initialization, we can't put the import on - # the module level, but have to have it on the function level. - import nest # noqa - - # TODO-PYNEST-NG: Enable again when KernelAttribute works - raise_errors = params.get("dict_miss_is_error", nest.dict_miss_is_error) - valids = nest._kernel_attr_names - readonly = nest._readonly_kernel_attrs - keys = list(params.keys()) - for key in keys: - msg = None - if key not in valids: - msg = ( - f"`{key}` is not a valid kernel parameter, " - + "valid parameters are: " - + ", ".join(f"'{p}'" for p in sorted(valids)) - ) - elif key in readonly: - msg = f"`{key}` is a readonly kernel parameter" - if msg is not None: - if raise_errors: - raise ValueError(msg) - else: - warnings.warn(msg + f" \n`{key}` has been ignored") - del params[key] - - nestkernel.llapi_set_kernel_status(params) - - -@check_stack -def GetKernelStatus(keys=None): - """Obtain parameters of the simulation kernel. - - Parameters - ---------- - - keys : str or list, optional - Single parameter name or `list` of parameter names - - Returns - ------- - - dict: - Parameter dictionary, if called without argument - type: - Single parameter value, if called with single parameter name - list: - List of parameter values, if called with list of parameter names - - Raises - ------ - - TypeError - If `keys` are of the wrong type. - - Notes - ----- - See SetKernelStatus for documentation on each parameter key. - - See Also - -------- - SetKernelStatus - - """ - - status_root = nestkernel.llapi_get_kernel_status() - - if keys is None: - return status_root - elif isinstance(keys, str): - return status_root[keys] - elif is_iterable(keys): - return tuple(status_root[k] for k in keys) - else: - raise TypeError("keys should be either a string or an iterable") - - -@check_stack -def Install(module_name): - """Load a dynamically linked NEST module. - - Parameters - ---------- - module_name : str - Name of the dynamically linked module - - Returns - ------- - handle - NEST module identifier, required for unloading - - Notes - ----- - Dynamically linked modules are searched in the NEST library - directory (``/lib/nest``) and in ``LD_LIBRARY_PATH`` (on - Linux) or ``DYLD_LIBRARY_PATH`` (on OSX). - - **Example** - :: - - nest.Install("mymodule") - - """ - - return sr("(%s) Install" % module_name) - - -@check_stack -def EnableStructuralPlasticity(): - """Enable structural plasticity for the network simulation - - See Also - -------- - DisableStructuralPlasticity - - """ - - sr("EnableStructuralPlasticity") - - -@check_stack -def DisableStructuralPlasticity(): - """Disable structural plasticity for the network simulation - - See Also - -------- - EnableStructuralPlasticity - - """ - sr("DisableStructuralPlasticity") diff --git a/pynest/nest/lib/_hl_api_spatial.py b/pynest/nest/lib/_hl_api_spatial.py deleted file mode 100644 index 6d474efb7c..0000000000 --- a/pynest/nest/lib/_hl_api_spatial.py +++ /dev/null @@ -1,1607 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_spatial.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions relating to spatial properties of nodes -""" - - -import numpy as np - -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel -from ._hl_api_helper import is_iterable -from ._hl_api_connections import GetConnections -from ._hl_api_parallel_computing import NumProcesses, Rank -from ._hl_api_types import NodeCollection - -try: - import matplotlib as mpl - import matplotlib.path as mpath - import matplotlib.patches as mpatches - HAVE_MPL = True -except ImportError: - HAVE_MPL = False - -__all__ = [ - 'CreateMask', - 'Displacement', - 'Distance', - 'DumpLayerConnections', - 'DumpLayerNodes', - 'FindCenterElement', - 'FindNearestElement', - 'GetPosition', - 'GetTargetNodes', - 'GetSourceNodes', - 'GetTargetPositions', - 'GetSourcePositions', - 'PlotLayer', - 'PlotProbabilityParameter', - 'PlotTargets', - 'PlotSources', - 'SelectNodesByMask', -] - - -def CreateMask(masktype, specs, anchor=None): - """ - Create a spatial mask for connections. - - Masks are used when creating connections. A mask describes the area of - the pool population that is searched for to connect for any given - node in the driver population. Several mask types are available. Examples - are the grid region, the rectangular, circular or doughnut region. - - The command :py:func:`.CreateMask` creates a `Mask` object which may be combined - with other `Mask` objects using Boolean operators. The mask is specified - in a dictionary. - - ``Mask`` objects can be passed to :py:func:`.Connect` in a connection dictionary with the key `'mask'`. - - Parameters - ---------- - masktype : str, ['rectangular' | 'circular' | 'doughnut' | 'elliptical'] - for 2D masks, ['box' | 'spherical' | 'ellipsoidal] for 3D masks, - ['grid'] only for grid-based layers in 2D. - The mask name corresponds to the geometrical shape of the mask. There - are different types for 2- and 3-dimensional layers. - specs : dict - Dictionary specifying the parameters of the provided `masktype`, - see **Mask types**. - anchor : [tuple/list of floats | dict with the keys `'column'` and \ - `'row'` (for grid masks only)], optional, default: None - By providing anchor coordinates, the location of the mask relative to - the driver node can be changed. The list of coordinates has a length - of 2 or 3 dependent on the number of dimensions. - - Returns - ------- - Mask: - Object representing the mask - - See also - -------- - Connect - - Notes - ----- - - All angles must be given in degrees. - - **Mask types** - - Available mask types (`masktype`) and their corresponding parameter - dictionaries: - - * 2D free and grid-based layers - :: - - 'rectangular' : - {'lower_left' : [float, float], - 'upper_right' : [float, float], - 'azimuth_angle': float # default:0.0} - #or - 'circular' : - {'radius' : float} - #or - 'doughnut' : - {'inner_radius' : float, - 'outer_radius' : float} - #or - 'elliptical' : - {'major_axis' : float, - 'minor_axis' : float, - 'azimuth_angle' : float, # default: 0.0, - 'anchor' : [float, float], # default: [0.0, 0.0]} - - * 3D free and grid-based layers - :: - - 'box' : - {'lower_left' : [float, float, float], - 'upper_right' : [float, float, float], - 'azimuth_angle: float # default: 0.0, - 'polar_angle : float # defualt: 0.0} - #or - 'spherical' : - {'radius' : float} - #or - 'ellipsoidal' : - {'major_axis' : float, - 'minor_axis' : float, - 'polar_axis' : float - 'azimuth_angle' : float, # default: 0.0, - 'polar_angle' : float, # default: 0.0, - 'anchor' : [float, float, float], # default: [0.0, 0.0, 0.0]}} - - * 2D grid-based layers only - :: - - 'grid' : - {'rows' : float, - 'columns' : float} - - By default the top-left corner of a grid mask, i.e., the grid - mask element with grid index [0, 0], is aligned with the driver - node. It can be changed by means of the 'anchor' parameter: - :: - - 'anchor' : - {'row' : float, - 'column' : float} - - **Example** - :: - - import nest - - # create a grid-based layer - l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # create a circular mask - m = nest.CreateMask('circular', {'radius': 0.2}) - - # connectivity specifications - conndict = {'rule': 'pairwise_bernoulli', - 'p': 1.0, - 'mask': m} - - # connect layer l with itself according to the specifications - nest.Connect(l, l, conndict) - """ - if anchor is None: - return sli_func('CreateMask', {masktype: specs}) - else: - return sli_func('CreateMask', - {masktype: specs, 'anchor': anchor}) - - -def GetPosition(nodes): - """ - Return the spatial locations of nodes. - - Parameters - ---------- - nodes : NodeCollection - `NodeCollection` of nodes we want the positions to - - Returns - ------- - tuple or tuple of tuple(s): - Tuple of position with 2- or 3-elements or list of positions - - See also - -------- - Displacement: Get vector of lateral displacement between nodes. - Distance: Get lateral distance between nodes. - DumpLayerConnections: Write connectivity information to file. - DumpLayerNodes: Write node positions to file. - - Notes - ----- - - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` - only works for nodes local to the current MPI process, if used in a - MPI-parallel simulation. - - Example - ------- - :: - - import nest - - # Reset kernel - nest.ResetKernel - - # create a NodeCollection with spatial extent - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # retrieve positions of all (local) nodes belonging to the population - pos = nest.GetPosition(s_nodes) - - # retrieve positions of the first node in the NodeCollection - pos = nest.GetPosition(s_nodes[0]) - - # retrieve positions of a subset of nodes in the population - pos = nest.GetPosition(s_nodes[2:18]) - """ - if not isinstance(nodes, NodeCollection): - raise TypeError("nodes must be a NodeCollection with spatial extent") - - return nestkernel.llapi_get_position(nodes._datum) - - -def Displacement(from_arg, to_arg): - """ - Get vector of lateral displacement from node(s)/Position(s) `from_arg` - to node(s) `to_arg`. - - Displacement is the shortest displacement, taking into account - periodic boundary conditions where applicable. If explicit positions - are given in the `from_arg` list, they are interpreted in the `to_arg` - population. - - - If one of `from_arg` or `to_arg` has length 1, and the other is longer, - the displacement from/to the single item to all other items is given. - - If `from_arg` and `to_arg` both have more than two elements, they have - to be of the same length and the displacement between each - pair is returned. - - Parameters - ---------- - from_arg : NodeCollection or tuple/list with tuple(s)/list(s) of floats - `NodeCollection` of node IDs or tuple/list of position(s) - to_arg : NodeCollection - `NodeCollection` of node IDs - - Returns - ------- - tuple: - Displacement vectors between pairs of nodes in `from_arg` and `to_arg` - - See also - -------- - Distance: Get lateral distances between nodes. - DumpLayerConnections: Write connectivity information to file. - GetPosition: Return the spatial locations of nodes. - - Notes - ----- - - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` - only works for nodes local to the current MPI process, if used in a - MPI-parallel simulation. - - **Example** - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # displacement between node 2 and 3 - print(nest.Displacement(s_nodes[1], s_nodes[2])) - - # displacment between the position (0.0., 0.0) and node 2 - print(nest.Displacement([(0.0, 0.0)], s_nodes[1])) - """ - if not isinstance(to_arg, NodeCollection): - raise TypeError("to_arg must be a NodeCollection") - - if isinstance(from_arg, np.ndarray): - from_arg = (from_arg, ) - - if (len(from_arg) > 1 and len(to_arg) > 1 and not - len(from_arg) == len(to_arg)): - raise ValueError("to_arg and from_arg must have same size unless one have size 1.") - - return sli_func('Displacement', from_arg, to_arg) - - -def Distance(from_arg, to_arg): - """ - Get lateral distances from node(s)/position(s) `from_arg` to node(s) `to_arg`. - - The distance between two nodes is the length of its displacement. - - If explicit positions are given in the `from_arg` list, they are - interpreted in the `to_arg` population. Distance is the shortest distance, - taking into account periodic boundary conditions where applicable. - - - If one of `from_arg` or `to_arg` has length 1, and the other is longer, - the displacement from/to the single item to all other items is given. - - If `from_arg` and `to_arg` both have more than two elements, they have - to be of the same length and the distance for each pair is - returned. - - Parameters - ---------- - from_arg : NodeCollection or tuple/list with tuple(s)/list(s) of floats - `NodeCollection` of node IDs or tuple/list of position(s) - to_arg : NodeCollection - `NodeCollection` of node IDs - - Returns - ------- - tuple: - Distances between `from` and `to` - - See also - -------- - Displacement: Get vector of lateral displacements between nodes. - DumpLayerConnections: Write connectivity information to file. - GetPosition: Return the spatial locations of nodes. - - Notes - ----- - - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` - only works for nodes local to the current MPI process, if used in a - MPI-parallel simulation. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # distance between node 2 and 3 - print(nest.Distance(s_nodes[1], s_nodes[2])) - - # distance between the position (0.0., 0.0) and node 2 - print(nest.Distance([(0.0, 0.0)], s_nodes[1])) - """ - if not isinstance(to_arg, NodeCollection): - raise TypeError("to_arg must be a NodeCollection") - - if isinstance(from_arg, np.ndarray): - from_arg = (from_arg, ) - - if (len(from_arg) > 1 and len(to_arg) > 1 and not - len(from_arg) == len(to_arg)): - raise ValueError("to_arg and from_arg must have same size unless one have size 1.") - - return sli_func('Distance', from_arg, to_arg) - - -def FindNearestElement(layer, locations, find_all=False): - """ - Return the node(s) closest to the `locations` in the given `layer`. - - This function works for fixed grid layer only. - - * If `locations` is a single 2-element array giving a grid location, return a - `NodeCollection` of `layer` elements at the given location. - * If `locations` is a list of coordinates, the function returns a list of `NodeCollection` of the nodes at all - locations. - - Parameters - ---------- - layer : NodeCollection - `NodeCollection` of spatially distributed node IDs - locations : tuple(s)/list(s) of tuple(s)/list(s) - 2-element list with coordinates of a single position, or list of - 2-element list of positions - find_all : bool, default: False - If there are several nodes with same minimal distance, return only the - first found, if `False`. - If `True`, instead of returning a single `NodeCollection`, return a list of `NodeCollection` - containing all nodes with minimal distance. - - Returns - ------- - NodeCollection: - `NodeCollection` of node IDs if locations is a 2-element list with coordinates of a single position - list: - list of `NodeCollection` if find_all is True or locations contains more than one position - - See also - -------- - FindCenterElement: Return NodeCollection of node closest to center of layers. - GetPosition: Return the spatial locations of nodes. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # get node ID of element closest to some location - nest.FindNearestElement(s_nodes, [3.0, 4.0], True) - """ - - if not isinstance(layer, NodeCollection): - raise TypeError("layer must be a NodeCollection") - - if not len(layer) > 0: - raise ValueError("layer cannot be empty") - - if not is_iterable(locations): - raise TypeError("locations must be coordinate array or list of coordinate arrays") - - # Ensure locations is sequence, keeps code below simpler - if not is_iterable(locations[0]): - locations = (locations, ) - - result = [] - - for loc in locations: - d = Distance(np.array(loc), layer) - - if not find_all: - dx = np.argmin(d) # finds location of one minimum - result.append(layer[dx]) - else: - minnode = list(layer[:1]) - minval = d[0] - for idx in range(1, len(layer)): - if d[idx] < minval: - minnode = [layer[idx]] - minval = d[idx] - elif np.abs(d[idx] - minval) <= 1e-14 * minval: - minnode.append(layer[idx]) - result.append(minnode) - - if len(result) == 1: - result = result[0] - - return result - - -def _rank_specific_filename(basename): - """Returns file name decorated with rank.""" - - if NumProcesses() == 1: - return basename - else: - np = NumProcesses() - np_digs = len(str(np - 1)) # for pretty formatting - rk = Rank() - dot = basename.find('.') - if dot < 0: - return '%s-%0*d' % (basename, np_digs, rk) - else: - return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:]) - - -def DumpLayerNodes(layer, outname): - """ - Write `node ID` and position data of `layer` to file. - - Write `node ID` and position data to `outname` file. For each node in `layer`, - a line with the following information is written: - :: - - node ID x-position y-position [z-position] - - If `layer` contains several `node IDs`, data for all nodes in `layer` will be written to a - single file. - - Parameters - ---------- - layer : NodeCollection - `NodeCollection` of spatially distributed node IDs - outname : str - Name of file to write to (existing files are overwritten) - - See also - -------- - DumpLayerConnections: Write connectivity information to file. - GetPosition: Return the spatial locations of nodes. - - Notes - ----- - * If calling this function from a distributed simulation, this function - will write to one file per MPI rank. - * File names are formed by adding the MPI Rank into the file name before - the file name suffix. - * Each file stores data for nodes local to that file. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # write layer node positions to file - nest.DumpLayerNodes(s_nodes, 'positions.txt') - - """ - if not isinstance(layer, NodeCollection): - raise TypeError("layer must be a NodeCollection") - - sli_func(""" - (w) file exch DumpLayerNodes close - """, - layer, _rank_specific_filename(outname)) - - -def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): - """ - Write connectivity information to file. - - This function writes connection information to file for all outgoing - connections from the given layers with the given synapse model. - - For each connection, one line is stored, in the following format: - :: - - source_node_id target_node_id weight delay dx dy [dz] - - where (dx, dy [, dz]) is the displacement from source to target node. - If targets do not have positions (eg spike recorders outside any layer), - NaN is written for each displacement coordinate. - - Parameters - ---------- - source_layers : NodeCollection - `NodeCollection` of spatially distributed node IDs - target_layers : NodeCollection - `NodeCollection` of (spatially distributed) node IDs - synapse_model : str - NEST synapse model - outname : str - Name of file to write to (will be overwritten if it exists) - - See also - -------- - DumpLayerNodes: Write layer node positions to file. - GetPosition: Return the spatial locations of nodes. - GetConnections: Return connection identifiers between - sources and targets - - Notes - ----- - * If calling this function from a distributed simulation, this function - will write to one file per MPI rank. - * File names are formed by inserting - the MPI Rank into the file name before the file name suffix. - * Each file stores data for local nodes. - - **Example** - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - nest.Connect(s_nodes, s_nodes, - {'rule': 'pairwise_bernoulli', 'p': 1.0}, - {'synapse_model': 'static_synapse'}) - - # write connectivity information to file - nest.DumpLayerConnections(s_nodes, s_nodes, 'static_synapse', 'conns.txt') - """ - if not isinstance(source_layer, NodeCollection): - raise TypeError("source_layer must be a NodeCollection") - if not isinstance(target_layer, NodeCollection): - raise TypeError("target_layer must be a NodeCollection") - - sli_func(""" - /oname Set - cvlit /synmod Set - /lyr_target Set - /lyr_source Set - oname (w) file lyr_source lyr_target synmod - DumpLayerConnections close - """, - source_layer, target_layer, synapse_model, - _rank_specific_filename(outname)) - - -def FindCenterElement(layer): - """ - Return `NodeCollection` of node closest to center of `layer`. - - Parameters - ---------- - layer : NodeCollection - `NodeCollection` with spatially distributed node IDs - - Returns - ------- - NodeCollection: - `NodeCollection` of the node closest to the center of the `layer`, as specified by `layer` - parameters given in ``layer.spatial``. If several nodes are equally close to the center, - an arbitrary one of them is returned. - - See also - -------- - FindNearestElement: Return the node(s) closest to the location(s) in the given `layer`. - GetPosition: Return the spatial locations of nodes. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) - - # get NodeCollection of the element closest to the center of the layer - nest.FindCenterElement(s_nodes) - """ - - if not isinstance(layer, NodeCollection): - raise TypeError("layer must be a NodeCollection") - nearest_to_center = FindNearestElement(layer, layer.spatial['center'])[0] - index = layer.index(nearest_to_center.get('global_id')) - return layer[index:index+1] - - -def GetTargetNodes(sources, tgt_layer, syn_model=None): - """ - Obtain targets of `sources` in given `target` population. - - For each neuron in `sources`, this function finds all target elements - in `tgt_layer`. If `syn_model` is not given (default), all targets are - returned, otherwise only targets connected via the given synapse model. - - Parameters - ---------- - sources : NodeCollection - NodeCollection with node IDs of `sources` - tgt_layer : NodeCollection - NodeCollection with node IDs of `tgt_layer` - syn_model : [None | str], optional, default: None - Return only target positions for a given synapse model. - - Returns - ------- - tuple of NodeCollection: - Tuple of `NodeCollections` of target neurons fulfilling the given criteria, one `NodeCollection` per - source node ID in `sources`. - - See also - -------- - GetTargetPositions: Obtain positions of targets in a given target layer connected to given source. - GetConnections: Return connection identifiers between - sources and targets - - Notes - ----- - * For distributed simulations, this function only returns targets on the - local MPI process. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # connectivity specifications with a mask - conndict = {'rule': 'pairwise_bernoulli', 'p': 1., - 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], - 'upper_right': [2.0, 1.0]}}} - - # connect population s_nodes with itself according to the given - # specifications - nest.Connect(s_nodes, s_nodes, conndict) - - # get the node IDs of the targets of a source neuron - nest.GetTargetNodes(s_nodes[4], s_nodes) - """ - if not isinstance(sources, NodeCollection): - raise TypeError("sources must be a NodeCollection.") - - if not isinstance(tgt_layer, NodeCollection): - raise TypeError("tgt_layer must be a NodeCollection") - - conns = GetConnections(sources, tgt_layer, synapse_model=syn_model) - - # Re-organize conns into one list per source, containing only target node IDs. - src_tgt_map = dict((snode_id, []) for snode_id in sources.tolist()) - for src, tgt in zip(conns.sources(), conns.targets()): - src_tgt_map[src].append(tgt) - - for src in src_tgt_map.keys(): - src_tgt_map[src] = NodeCollection(list(np.unique(src_tgt_map[src]))) - - # convert dict to nested list in same order as sources - return tuple(src_tgt_map[snode_id] for snode_id in sources.tolist()) - - -def GetSourceNodes(src_layer, targets, syn_model=None): - """ - Obtain sources of `targets` in given `src_layer` population. - - For each neuron in `targets`, this function finds all target elements - in `src_layer`. If `syn_model` is not given (default), all sources are - returned, otherwise only sources connected via the given synapse model. - - Parameters - ---------- - src_layer : NodeCollection - NodeCollection with node IDs of `src_layer` - targets : NodeCollection - NodeCollection with node IDs of `targets` - syn_model : [None | str], optional, default: None - Return only source positions for a given synapse model. - - Returns - ------- - tuple of NodeCollection: - Tuple of `NodeCollections` of source neurons fulfilling the given criteria, one `NodeCollection` per - target node ID in `target`. - - See also - -------- - GetSourcePositions: Obtain positions of sources in a given source layer connected to given target. - GetConnections: Return connection identifiers between sources and targets. - - Notes - ----- - * For distributed simulations, this function only returns source on the - local MPI process. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # connectivity specifications with a mask - conndict = {'rule': 'pairwise_bernoulli', 'p': 1., - 'mask': {'rectangular': {'lower_left': [-2.0, -1.0], - 'upper_right': [2.0, 1.0]}}} - - # connect population s_nodes with itself according to the given - # specifications - nest.Connect(s_nodes, s_nodes, conndict) - - # get the node IDs of the targets of a source neuron - nest.GetSourceNodes(s_nodes, s_nodes[4]) - """ - if not isinstance(src_layer, NodeCollection): - raise TypeError("src_layer must be a NodeCollection") - - if not isinstance(targets, NodeCollection): - raise TypeError("targets must be a NodeCollection.") - - conns = GetConnections(src_layer, targets, synapse_model=syn_model) - - # Re-organize conns into one list per target, containing only source node IDs. - tgt_src_map = dict((tnode_id, []) for tnode_id in targets.tolist()) - for src, tgt in zip(conns.sources(), conns.targets()): - tgt_src_map[tgt].append(src) - - for tgt in tgt_src_map.keys(): - tgt_src_map[tgt] = NodeCollection(list(np.unique(tgt_src_map[tgt]))) - - # convert dict to nested list in same order as sources - return tuple(tgt_src_map[tnode_id] for tnode_id in targets.tolist()) - - -def GetTargetPositions(sources, tgt_layer, syn_model=None): - """ - Obtain positions of targets to a given `NodeCollection` of `sources`. - - For each neuron in `sources`, this function finds all target elements - in `tgt_layer`. If `syn_model` is not given (default), all targets are - returned, otherwise only targets connected via the given syanpse model. - - Parameters - ---------- - sources : NodeCollection - `NodeCollection` with node ID(s) of source neurons - tgt_layer : NodeCollection - `NodeCollection` of tgt_layer - syn_type : [None | str], optional, default: None - Return only target positions for a given synapse model. - - Returns - ------- - list of list(s) of tuple(s) of floats: - Positions of target neurons fulfilling the given criteria as a nested - list, containing one list of positions per node in sources. - - See also - -------- - GetTargetNodes: Obtain targets of a `NodeCollection` of sources in a given target - population. - - Notes - ----- - * For distributed simulations, this function only returns targets on the - local MPI process. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # connectivity specifications with a mask - conndict = {'rule': 'pairwise_bernoulli', 'p': 1., - 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], - 'upper_right': [2.0, 1.0]}}} - - # connect population s_nodes with itself according to the given - # specifications - nest.Connect(s_nodes, s_nodes, conndict) - - # get the positions of the targets of a source neuron - nest.GetTargetPositions(s_nodes[5], s_nodes) - """ - if not isinstance(sources, NodeCollection): - raise TypeError("sources must be a NodeCollection.") - - # Find positions to all nodes in target layer - pos_all_tgts = GetPosition(tgt_layer) - first_tgt_node_id = tgt_layer[0].get('global_id') - - connections = GetConnections(sources, tgt_layer, - synapse_model=syn_model) - srcs = connections.get('source') - tgts = connections.get('target') - if isinstance(srcs, int): - srcs = [srcs] - if isinstance(tgts, int): - tgts = [tgts] - - # Make dictionary where the keys are the source node_ids, which is mapped to a - # list with the positions of the targets connected to the source. - src_tgt_pos_map = dict((snode_id, []) for snode_id in sources.tolist()) - - for i in range(len(connections)): - tgt_indx = tgts[i] - first_tgt_node_id - src_tgt_pos_map[srcs[i]].append(pos_all_tgts[tgt_indx]) - - # Turn dict into list in same order as sources - return [src_tgt_pos_map[snode_id] for snode_id in sources.tolist()] - - -def GetSourcePositions(src_layer, targets, syn_model=None): - """ - Obtain positions of sources to a given `NodeCollection` of `targets`. - - For each neuron in `targets`, this function finds all source elements - in `src_layer`. If `syn_model` is not given (default), all targets are - returned, otherwise only sources connected via the given synapse model. - - Parameters - ---------- - src_layer : NodeCollection - `NodeCollection` of src_layer - targets : NodeCollection - `NodeCollection` with node ID(s) of target neurons - syn_type : [None | str], optional, default: None - Return only source positions for a given synapse model. - - Returns - ------- - list of list(s) of tuple(s) of floats: - Positions of source neurons fulfilling the given criteria as a nested - list, containing one list of positions per node in targets. - - See also - -------- - GetSourceNodes: Obtain sources of a `NodeCollection` of targets in a given source - population. - - Notes - ----- - * For distributed simulations, this function only returns sources on the - local MPI process. - - Example - ------- - :: - - import nest - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # connectivity specifications with a mask - conndict = {'rule': 'pairwise_bernoulli', 'p': 1., - 'mask': {'rectangular': {'lower_left': [-2.0, -1.0], - 'upper_right': [2.0, 1.0]}}} - - # connect population s_nodes with itself according to the given - # specifications - nest.Connect(s_nodes, s_nodes, conndict) - - # get the positions of the targets of a source neuron - nest.GetSourcePositions(s_nodes, s_nodes[5]) - """ - if not isinstance(targets, NodeCollection): - raise TypeError("targets must be a NodeCollection.") - - # Find positions to all nodes in source layer - pos_all_srcs = GetPosition(src_layer) - first_src_node_id = src_layer[0].get('global_id') - - connections = GetConnections(src_layer, targets, - synapse_model=syn_model) - srcs = connections.get('source') - tgts = connections.get('target') - if isinstance(srcs, int): - srcs = [srcs] - if isinstance(tgts, int): - tgts = [tgts] - - # Make dictionary where the keys are the target node_ids, which is mapped to a - # list with the positions of the sources connected to the targets. - tgt_src_pos_map = dict((tnode_id, []) for tnode_id in targets.tolist()) - for i in range(len(connections)): - src_indx = srcs[i] - first_src_node_id - tgt_src_pos_map[tgts[i]].append(pos_all_srcs[src_indx]) - - # Turn dict into list in same order as target - return [tgt_src_pos_map[tnode_id] for tnode_id in targets.tolist()] - - -def SelectNodesByMask(layer, anchor, mask_obj): - """ - Obtain the node IDs inside a masked area of a spatially distributed population. - - The function finds and returns all the node IDs inside a given mask of a - `layer`. The node IDs are returned as a `NodeCollection`. The function works on both 2-dimensional and - 3-dimensional masks and layers. All mask types are allowed, including combined masks. - - Parameters - ---------- - layer : NodeCollection - `NodeCollection` with node IDs of the `layer` to select nodes from. - anchor : tuple/list of double - List containing center position of the layer. This is the point from - where we start to search. - mask_obj: object - `Mask` object specifying chosen area. - - Returns - ------- - NodeCollection: - `NodeCollection` of nodes/elements inside the mask. - """ - - if not isinstance(layer, NodeCollection): - raise TypeError("layer must be a NodeCollection.") - - mask_datum = mask_obj._datum - - node_id_list = sli_func('SelectNodesByMask', - layer, anchor, mask_datum) - - # When creating a NodeCollection, the input list of nodes IDs must be sorted. - return NodeCollection(sorted(node_id_list)) - - -def _draw_extent(ax, xctr, yctr, xext, yext): - """Draw extent and set aspect ration, limits""" - - # import pyplot here and not at toplevel to avoid preventing users - # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt - - # thin gray line indicating extent - llx, lly = xctr - xext / 2.0, yctr - yext / 2.0 - urx, ury = llx + xext, lly + yext - ax.add_patch( - plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1, - zorder=1)) - - # set limits slightly outside extent - ax.set(aspect='equal', - xlim=(llx - 0.05 * xext, urx + 0.05 * xext), - ylim=(lly - 0.05 * yext, ury + 0.05 * yext), - xticks=tuple(), yticks=tuple()) - - -def _shifted_positions(pos, ext): - """Get shifted positions corresponding to boundary conditions.""" - return [[pos[0] + ext[0], pos[1]], - [pos[0] - ext[0], pos[1]], - [pos[0], pos[1] + ext[1]], - [pos[0], pos[1] - ext[1]], - [pos[0] + ext[0], pos[1] - ext[1]], - [pos[0] - ext[0], pos[1] + ext[1]], - [pos[0] + ext[0], pos[1] + ext[1]], - [pos[0] - ext[0], pos[1] - ext[1]]] - - -def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): - """ - Plot all nodes in a `layer`. - - Parameters - ---------- - layer : NodeCollection - `NodeCollection` of spatially distributed nodes - fig : [None | matplotlib.figure.Figure object], optional, default: None - Matplotlib figure to plot to. If not given, a new figure is - created. - nodecolor : [None | any matplotlib color], optional, default: 'b' - Color for nodes - nodesize : float, optional, default: 20 - Marker size for nodes - - Returns - ------- - `matplotlib.figure.Figure` object - - See also - -------- - PlotProbabilityParameter: Create a plot of the connection probability and/or mask. - PlotTargets: Plot all targets of a given source. - matplotlib.figure.Figure : matplotlib Figure class - - Notes - ----- - * Do **not** use this function in distributed simulations. - - - Example - ------- - :: - - import nest - import matplotlib.pyplot as plt - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # plot layer with all its nodes - nest.PlotLayer(s_nodes) - plt.show() - """ - - # import pyplot here and not at toplevel to avoid preventing users - # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt - - if not HAVE_MPL: - raise ImportError('Matplotlib could not be imported') - - if not isinstance(layer, NodeCollection): - raise TypeError('layer must be a NodeCollection.') - - # get layer extent - ext = layer.spatial['extent'] - - if len(ext) == 2: - # 2D layer - - # get layer extent and center, x and y - xext, yext = ext - xctr, yctr = layer.spatial['center'] - - # extract position information, transpose to list of x and y pos - if len(layer) == 1: - # handle case of single node - xpos, ypos = GetPosition(layer) - else: - xpos, ypos = zip(*GetPosition(layer)) - - if fig is None: - fig = plt.figure() - ax = fig.add_subplot(111) - else: - ax = fig.gca() - - ax.scatter(xpos, ypos, s=nodesize, facecolor=nodecolor) - _draw_extent(ax, xctr, yctr, xext, yext) - - elif len(ext) == 3: - # 3D layer - from mpl_toolkits.mplot3d import Axes3D - - # extract position information, transpose to list of x,y,z pos - if len(layer) == 1: - # handle case of single node - pos = GetPosition(layer) - else: - pos = zip(*GetPosition(layer)) - - if fig is None: - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - else: - ax = fig.gca() - - ax.scatter(*pos, s=nodesize, c=nodecolor) - plt.draw_if_interactive() - - else: - raise ValueError("unexpected dimension of layer") - - return fig - - -def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, - mask=None, probability_parameter=None, - src_color='red', src_size=50, tgt_color='blue', tgt_size=20, - mask_color='yellow', probability_cmap='Greens'): - """ - Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`. - - Parameters - ---------- - src_nrn : NodeCollection - `NodeCollection` of source neuron (as single-element NodeCollection) - tgt_layer : NodeCollection - `NodeCollection` of tgt_layer - syn_type : [None | str], optional, default: None - Show only targets connected with a given synapse type - fig : [None | matplotlib.figure.Figure object], optional, default: None - Matplotlib figure to plot to. If not given, a new figure is created. - mask : [None | dict], optional, default: None - Draw mask with targets; see :py:func:`.PlotProbabilityParameter` for details. - probability_parameter : [None | Parameter], optional, default: None - Draw connection probability with targets; see :py:func:`.PlotProbabilityParameter` for details. - src_color : [None | any matplotlib color], optional, default: 'red' - Color used to mark source node position - src_size : float, optional, default: 50 - Size of source marker (see scatter for details) - tgt_color : [None | any matplotlib color], optional, default: 'blue' - Color used to mark target node positions - tgt_size : float, optional, default: 20 - Size of target markers (see scatter for details) - mask_color : [None | any matplotlib color], optional, default: 'red' - Color used for line marking mask - probability_cmap : [None | any matplotlib cmap color], optional, default: 'Greens' - Color used for lines marking probability parameter. - - Returns - ------- - matplotlib.figure.Figure object - - See also - -------- - PlotSources: Plot all sources of target neuron in a source layer. - GetTargetNodes: Obtain targets of a sources in a given target layer. - GetTargetPositions: Obtain positions of targets of sources in a given target layer. - probability_parameter: Add indication of connection probability and mask to axes. - PlotLayer: Plot all nodes in a spatially distributed population. - matplotlib.pyplot.scatter : matplotlib scatter plot. - - Notes - ----- - * Do **not** use this function in distributed simulations. - - **Example** - :: - - import nest - import matplotlib.pyplot as plt - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # connectivity specifications with a mask - conndict = {'rule': 'pairwise_bernoulli', 'p': 1., - 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], - 'upper_right': [2.0, 1.0]}}} - - # connect population s_nodes with itself according to the given - # specifications - nest.Connect(s_nodes, s_nodes, conndict) - - # plot the targets of a source neuron - nest.PlotTargets(s_nodes[4], s_nodes) - plt.show() - """ - - # import pyplot here and not at toplevel to avoid preventing users - # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt - - if not HAVE_MPL: - raise ImportError("Matplotlib could not be imported") - - if not isinstance(src_nrn, NodeCollection) or len(src_nrn) != 1: - raise TypeError("src_nrn must be a single element NodeCollection.") - if not isinstance(tgt_layer, NodeCollection): - raise TypeError("tgt_layer must be a NodeCollection.") - - # get position of source - srcpos = GetPosition(src_nrn) - - # get layer extent - ext = tgt_layer.spatial['extent'] - - if len(ext) == 2: - # 2D layer - - # get layer extent and center, x and y - xext, yext = ext - xctr, yctr = tgt_layer.spatial['center'] - - if fig is None: - fig = plt.figure() - ax = fig.add_subplot(111) - else: - ax = fig.gca() - - # get positions, reorganize to x and y vectors - tgtpos = GetTargetPositions(src_nrn, tgt_layer, syn_type) - if tgtpos: - xpos, ypos = zip(*tgtpos[0]) - ax.scatter(xpos, ypos, s=tgt_size, facecolor=tgt_color) - - ax.scatter(srcpos[:1], srcpos[1:], s=src_size, facecolor=src_color, alpha=0.4, zorder=-10) - - if mask is not None or probability_parameter is not None: - edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] - PlotProbabilityParameter(src_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, - prob_cmap=probability_cmap, mask_color=mask_color) - - _draw_extent(ax, xctr, yctr, xext, yext) - - else: - # 3D layer - from mpl_toolkits.mplot3d import Axes3D - - if fig is None: - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - else: - ax = fig.gca() - - # get positions, reorganize to x,y,z vectors - tgtpos = GetTargetPositions(src_nrn, tgt_layer, syn_type) - if tgtpos: - xpos, ypos, zpos = zip(*tgtpos[0]) - ax.scatter3D(xpos, ypos, zpos, s=tgt_size, facecolor=tgt_color) - - ax.scatter3D(srcpos[:1], srcpos[1:2], srcpos[2:], s=src_size, facecolor=src_color, alpha=0.4, zorder=-10) - - plt.draw_if_interactive() - - return fig - - -def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, - mask=None, probability_parameter=None, - tgt_color='red', tgt_size=50, src_color='blue', src_size=20, - mask_color='yellow', probability_cmap='Greens'): - """ - Plot all sources of target neuron `tgt_nrn` in a source layer `src_layer`. - - Parameters - ---------- - src_layer : NodeCollection - `NodeCollection` of src_layer - tgt_nrn : NodeCollection - `NodeCollection` of target neuron (as single-element NodeCollection) - syn_type : [None | str], optional, default: None - Show only targets connected with a given synapse type - fig : [None | matplotlib.figure.Figure object], optional, default: None - Matplotlib figure to plot to. If not given, a new figure is created. - mask : [None | dict], optional, default: None - Draw mask with targets; see :py:func:`.PlotProbabilityParameter` for details. - probability_parameter : [None | Parameter], optional, default: None - Draw connection probability with targets; see :py:func:`.PlotProbabilityParameter` for details. - tgt_color : [None | any matplotlib color], optional, default: 'red' - Color used to mark target node position - tgt_size : float, optional, default: 50 - Size of target marker (see scatter for details) - src_color : [None | any matplotlib color], optional, default: 'blue' - Color used to mark source node positions - src_size : float, optional, default: 20 - Size of source markers (see scatter for details) - mask_color : [None | any matplotlib color], optional, default: 'red' - Color used for line marking mask - probability_cmap : [None | any matplotlib cmap color], optional, default: 'Greens' - Color used for lines marking probability parameter. - - Returns - ------- - matplotlib.figure.Figure object - - See also - -------- - PlotTargets: Plot all targets of source neuron in a target layer. - GetSourceNodes: Obtain sources of a target in a given source layer. - GetSourcePositions: Obtain positions of sources of target in a given source layer. - probability_parameter: Add indication of connection probability and mask to axes. - PlotLayer: Plot all nodes in a spatially distributed population. - matplotlib.pyplot.scatter : matplotlib scatter plot. - - Notes - ----- - * Do **not** use this function in distributed simulations. - - **Example** - :: - - import nest - import matplotlib.pyplot as plt - - # create a spatial population - s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) - - # connectivity specifications with a mask - conndict = {'rule': 'pairwise_bernoulli', 'p': 1., - 'use_on_source': True, - 'mask': {'rectangular': {'lower_left': [-2.0, -1.0], - 'upper_right': [2.0, 1.0]}}} - - # connect population s_nodes with itself according to the given - # specifications - nest.Connect(s_nodes, s_nodes, conndict) - - # plot the targets of a source neuron - nest.PlotSources(s_nodes, s_nodes[4]) - plt.show() - """ - - # import pyplot here and not at toplevel to avoid preventing users - # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt - - if not isinstance(tgt_nrn, NodeCollection) or len(tgt_nrn) != 1: - raise TypeError("tgt_nrn must be a single element NodeCollection.") - if not isinstance(src_layer, NodeCollection): - raise TypeError("src_layer must be a NodeCollection.") - - # get position of source - tgtpos = GetPosition(tgt_nrn) - - # get layer extent - ext = src_layer.spatial['extent'] - - if len(ext) == 2: - # 2D layer - - # get layer extent and center, x and y - xext, yext = ext - xctr, yctr = src_layer.spatial['center'] - - if fig is None: - fig = plt.figure() - ax = fig.add_subplot(111) - else: - ax = fig.gca() - - # get positions, reorganize to x and y vectors - srcpos = GetSourcePositions(src_layer, tgt_nrn, syn_type) - if srcpos: - xpos, ypos = zip(*srcpos[0]) - ax.scatter(xpos, ypos, s=src_size, facecolor=src_color) - - ax.scatter(tgtpos[:1], tgtpos[1:], s=tgt_size, facecolor=src_color, alpha=0.4, zorder=-10) - - if mask is not None or probability_parameter is not None: - edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] - PlotProbabilityParameter(tgt_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, - prob_cmap=probability_cmap, mask_color=mask_color) - - _draw_extent(ax, xctr, yctr, xext, yext) - - else: - # 3D layer - from mpl_toolkits.mplot3d import Axes3D - - if fig is None: - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - else: - ax = fig.gca() - - # get positions, reorganize to x,y,z vectors - srcpos = GetSourcePositions(src_layer, tgt_nrn, syn_type) - if tgtpos: - xpos, ypos, zpos = zip(*srcpos[0]) - ax.scatter3D(xpos, ypos, zpos, s=src_size, facecolor=tgt_color) - - ax.scatter3D(tgtpos[:1], tgtpos[1:2], tgtpos[2:], s=tgt_size, facecolor=tgt_color, alpha=0.4, zorder=-10) - - plt.draw_if_interactive() - - return fig - - -def _create_mask_patches(mask, periodic, extent, source_pos, face_color='yellow'): - """Create Matplotlib Patch objects representing the mask""" - - # import pyplot here and not at toplevel to avoid preventing users - # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt - import matplotlib as mtpl - - edge_color = 'black' - alpha = 0.2 - line_width = 2 - mask_patches = [] - - if 'anchor' in mask: - offs = np.array(mask['anchor']) - else: - offs = np.array([0., 0.]) - - if 'circular' in mask: - r = mask['circular']['radius'] - - patch = plt.Circle(source_pos + offs, radius=r, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) - mask_patches.append(patch) - - if periodic: - for pos in _shifted_positions(source_pos + offs, extent): - patch = plt.Circle(pos, radius=r, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) - mask_patches.append(patch) - elif 'doughnut' in mask: - # Mmm... doughnut - def make_doughnut_patch(pos, r_out, r_in, ec, fc, alpha): - def make_circle(r): - t = np.arange(0, np.pi * 2.0, 0.01) - t = t.reshape((len(t), 1)) - x = r * np.cos(t) - y = r * np.sin(t) - return np.hstack((x, y)) - outside_verts = make_circle(r_out)[::-1] - inside_verts = make_circle(r_in) - codes = np.ones(len(inside_verts), dtype=mpath.Path.code_type) * mpath.Path.LINETO - codes[0] = mpath.Path.MOVETO - vertices = np.concatenate([outside_verts, inside_verts]) - vertices += pos - all_codes = np.concatenate((codes, codes)) - path = mpath.Path(vertices, all_codes) - return mpatches.PathPatch(path, fc=fc, ec=ec, alpha=alpha, lw=line_width) - - r_in = mask['doughnut']['inner_radius'] - r_out = mask['doughnut']['outer_radius'] - pos = source_pos + offs - patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) - mask_patches.append(patch) - if periodic: - for pos in _shifted_positions(source_pos + offs, extent): - patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) - mask_patches.append(patch) - elif 'rectangular' in mask: - ll = np.array(mask['rectangular']['lower_left']) - ur = np.array(mask['rectangular']['upper_right']) - width = ur[0] - ll[0] - height = ur[1] - ll[1] - pos = source_pos + ll + offs - cntr = [pos[0] + width/2, pos[1] + height/2] - - if 'azimuth_angle' in mask['rectangular']: - angle = mask['rectangular']['azimuth_angle'] - else: - angle = 0.0 - - patch = plt.Rectangle(pos, width, height, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) - # Need to rotate about center - trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData - patch.set_transform(trnsf) - mask_patches.append(patch) - - if periodic: - for pos in _shifted_positions(source_pos + ll + offs, extent): - patch = plt.Rectangle(pos, width, height, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) - - cntr = [pos[0] + width/2, pos[1] + height/2] - # Need to rotate about center - trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData - patch.set_transform(trnsf) - mask_patches.append(patch) - elif 'elliptical' in mask: - width = mask['elliptical']['major_axis'] - height = mask['elliptical']['minor_axis'] - if 'azimuth_angle' in mask['elliptical']: - angle = mask['elliptical']['azimuth_angle'] - else: - angle = 0.0 - if 'anchor' in mask['elliptical']: - anchor = mask['elliptical']['anchor'] - else: - anchor = np.array([0., 0.]) - patch = mpl.patches.Ellipse(source_pos + offs + anchor, width, height, - angle=angle, fc=face_color, - ec=edge_color, alpha=alpha, lw=line_width) - mask_patches.append(patch) - - if periodic: - for pos in _shifted_positions(source_pos + offs + anchor, extent): - patch = mpl.patches.Ellipse(pos, width, height, angle=angle, fc=face_color, - ec=edge_color, alpha=alpha, lw=line_width) - mask_patches.append(patch) - else: - raise ValueError('Mask type cannot be plotted with this version of PyNEST.') - return mask_patches - - -def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5, -0.5, 0.5], shape=[100, 100], - ax=None, prob_cmap='Greens', mask_color='yellow'): - """ - Create a plot of the connection probability and/or mask. - - A probability plot is created based on a `Parameter` and a `source`. The - `Parameter` should have a distance dependency. The `source` must be given - as a `NodeCollection` with a single node ID. Optionally a `mask` can also be - plotted. - - Parameters - ---------- - source : NodeCollection - Single node ID `NodeCollection` to use as source. - parameter : Parameter - `Parameter` the probability is based on. - mask : Dictionary - Optional specification of a connection mask. Connections will only - be made to nodes inside the mask. See :py:func:`.CreateMask` for options on - how to specify the mask. - edges : list/tuple - List of four edges of the region to plot. The values are given as - [x_min, x_max, y_min, y_max]. - shape : list/tuple - Number of `Parameter` values to calculate in each direction. - ax : matplotlib.axes.AxesSubplot, - A matplotlib axes instance to plot in. If none is given, - a new one is created. - """ - - # import pyplot here and not at toplevel to avoid preventing users - # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt - - if not HAVE_MPL: - raise ImportError('Matplotlib could not be imported') - - if parameter is None and mask is None: - raise ValueError('At least one of parameter or mask must be specified') - if ax is None: - fig, ax = plt.subplots() - ax.set_xlim(*edges[:2]) - ax.set_ylim(*edges[2:]) - - if parameter is not None: - z = np.zeros(shape[::-1]) - for i, x in enumerate(np.linspace(edges[0], edges[1], shape[0])): - positions = [[x, y] for y in np.linspace(edges[2], edges[3], shape[1])] - values = parameter.apply(source, positions) - z[:, i] = np.array(values) - img = ax.imshow(np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, - origin='lower', cmap=prob_cmap, vmin=0., vmax=1.) - plt.colorbar(img, ax=ax, fraction=0.046, pad=0.04) - - if mask is not None: - periodic = source.spatial['edge_wrap'] - extent = source.spatial['extent'] - source_pos = GetPosition(source) - patches = _create_mask_patches(mask, periodic, extent, source_pos, face_color=mask_color) - for patch in patches: - patch.set_zorder(0.5) - ax.add_patch(patch) diff --git a/pynest/nest/lib/_hl_api_types.py b/pynest/nest/lib/_hl_api_types.py deleted file mode 100644 index c4c2cab999..0000000000 --- a/pynest/nest/lib/_hl_api_types.py +++ /dev/null @@ -1,1221 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_types.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Classes defining the different PyNEST types -""" - -from .._ll_api import * -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel -from ._hl_api_helper import ( - get_parameters, - get_parameters_hierarchical_addressing, - is_iterable, - restructure_data, -) -from ._hl_api_simulation import GetKernelStatus - - -def sli_func(*args, **kwargs): - raise RuntimeError(f'Called sli_func with\nargs: {args}\nkwargs: {kwargs}') - -import numpy -import json -from math import floor, log - -try: - import pandas - HAVE_PANDAS = True -except ImportError: - HAVE_PANDAS = False - -__all__ = [ - 'CollocatedSynapses', - 'Compartments', - 'CreateParameter', - 'Mask', - 'NodeCollection', - 'Parameter', - 'Receptors', - 'serializable', - 'SynapseCollection', - 'to_json', -] - - -def CreateParameter(parametertype, specs): - """ - Create a parameter. - - Parameters - ---------- - parametertype : string - Parameter type with or without distance dependency. - Can be one of the following: 'constant', 'linear', 'exponential', 'gaussian', 'gaussian2D', - 'uniform', 'normal', 'lognormal', 'distance', 'position' - specs : dict - Dictionary specifying the parameters of the provided - `parametertype`, see **Parameter types**. - - - Returns - ------- - ``Parameter``: - Object representing the parameter - - Notes - ----- - - Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for - instance :py:func:`.uniform`. - - **Parameter types** - - Examples of available parameter types (`parametertype` parameter), with their function and - acceptable keys for their corresponding specification dictionaries: - - * Constant - :: - - 'constant' : - {'value' : float} # constant value - * Randomization - :: - - # random parameter with uniform distribution in [min,max) - 'uniform' : - {'min' : float, # minimum value, default: 0.0 - 'max' : float} # maximum value, default: 1.0 - - # random parameter with normal distribution - 'normal': - {'mean' : float, # mean value, default: 0.0 - 'std' : float} # standard deviation, default: 1.0 - - # random parameter with lognormal distribution - 'lognormal' : - {'mean' : float, # mean value of logarithm, default: 0.0 - 'std' : float} # standard deviation of log, default: 1.0 - """ - return nestkernel.llapi_create_parameter({parametertype: specs}) - - -class NodeCollectionIterator: - """ - Iterator class for `NodeCollection`. - - Returns - ------- - `NodeCollection`: - Single node ID `NodeCollection` of respective iteration. - """ - - def __init__(self, nc): - self._nc = nc - self._increment = 0 - - def __iter__(self): - return self - - def __next__(self): - if self._increment > len(self._nc) - 1: - raise StopIteration - - index = self._increment + (self._increment >= 0) - val = nestkernel.llapi_slice(self._nc._datum, index, index, 1) - self._increment += 1 - return val - - -class NodeCollection: - """ - Class for `NodeCollection`. - - `NodeCollection` represents the nodes of a network. The class supports - iteration, concatenation, indexing, slicing, membership, length, conversion to and - from lists, test for membership, and test for equality. By using the - membership functions :py:func:`get()` and :py:func:`set()`, you can get and set desired - parameters. - - A `NodeCollection` is created by the :py:func:`.Create` function, or by converting a - list of nodes to a `NodeCollection` with ``nest.NodeCollection(list)``. - - If your nodes have spatial extent, use the member parameter ``spatial`` to get the spatial information. - - Slicing a NodeCollection follows standard Python slicing syntax: nc[start:stop:step], where start and stop - gives the zero-indexed right-open range of nodes, and step gives the step length between nodes. The step must - be strictly positive. - - Example - ------- - :: - - import nest - - nest.ResetKernel() - - # Create NodeCollection representing nodes - nc = nest.Create('iaf_psc_alpha', 10) - - # Convert from list - node_ids_in = [2, 4, 6, 8] - new_nc = nest.NodeCollection(node_ids_in) - - # Convert to list - nc_list = nc.tolist() - - # Concatenation - Enrns = nest.Create('aeif_cond_alpha', 600) - Inrns = nest.Create('iaf_psc_alpha', 400) - nrns = Enrns + Inrns - - # Slicing and membership - print(new_nc[2]) - print(new_nc[1:2]) - 6 in new_nc - """ - - _datum = None - - def __init__(self, data=None): - if data is None: - data = [] - if isinstance(data, nestkernel.NodeCollectionObject): - self._datum = data - else: - # Data from user, must be converted to datum - # Data can be anything that can be converted to a NodeCollection, - # such as list, tuple, etc. - nc = nestkernel.llapi_make_nodecollection(data) - self._datum = nc._datum - - def __iter__(self): - return NodeCollectionIterator(self) - - def __add__(self, other): - if not isinstance(other, NodeCollection): - raise NotImplementedError() - - return nestkernel.llapi_join_nc(self._datum, other._datum) - - def __getitem__(self, key): - if isinstance(key, slice): - if key.start is None: - start = 1 - else: - start = key.start + 1 if key.start >= 0 else key.start - if abs(start) > self.__len__(): - raise IndexError('slice start value outside of the NodeCollection') - if key.stop is None: - stop = self.__len__() - else: - stop = key.stop if key.stop > 0 else key.stop - 1 - if abs(stop) > self.__len__(): - raise IndexError('slice stop value outside of the NodeCollection') - step = 1 if key.step is None else key.step - if step < 1: - raise IndexError('slicing step for NodeCollection must be strictly positive') - - return nestkernel.llapi_slice(self._datum, start, stop, step) - elif isinstance(key, (int, numpy.integer)): - if abs(key + (key >= 0)) > self.__len__(): - raise IndexError('index value outside of the NodeCollection') - return self[key:key + 1:1] - elif isinstance(key, (list, tuple)): - if len(key) == 0: - return NodeCollection([]) - # Must check if elements are bool first, because bool inherits from int - if all(isinstance(x, bool) for x in key): - if len(key) != len(self): - raise IndexError('Bool index array must be the same length as NodeCollection') - np_key = numpy.array(key, dtype=bool) - # Checking that elements are not instances of bool too, because bool inherits from int - elif all(isinstance(x, int) and not isinstance(x, bool) for x in key): - np_key = numpy.array(key, dtype=numpy.uint64) - if len(numpy.unique(np_key)) != len(np_key): - raise ValueError('All node IDs in a NodeCollection have to be unique') - else: - raise TypeError('Indices must be integers or bools') - return nestkernel.llapi_take_array_index(self._datum, np_key) - elif isinstance(key, numpy.ndarray): - if len(key) == 0: - return NodeCollection([]) - if len(key.shape) != 1: - raise TypeError('NumPy indices must one-dimensional') - is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type) - if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)): - raise TypeError('NumPy indices must be an array of integers or bools') - if is_booltype and len(key) != len(self): - raise IndexError('Bool index array must be the same length as NodeCollection') - if not is_booltype and len(numpy.unique(key)) != len(key): - raise ValueError('All node IDs in a NodeCollection have to be unique') - return nestkernel.llapi_take_array_index(self._datum, key) - else: - raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices') - - def __contains__(self, node_id): - return nestkernel.llapi_nc_contains(self._datum, node_id) - - def __eq__(self, other): - if not isinstance(other, NodeCollection): - raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__)) - - if self.__len__() != other.__len__(): - return False - - return nestkernel.llapi_eq_nc(self._datum, other._datum) - - def __neq__(self, other): - if not isinstance(other, NodeCollection): - raise NotImplementedError() - - return not self == other - - def __len__(self): - return nestkernel.llapi_nc_size(self._datum) - - def __str__(self): - return nestkernel.llapi_to_string(self._datum) - - def __repr__(self): - return self.__str__() - - def get(self, *params, **kwargs): - """ - Get parameters from nodes. - - Parameters - ---------- - params : str or list, optional - Parameters to get from the nodes. It must be one of the following: - - - A single string. - - A list of strings. - - One or more strings, followed by a string or list of strings. - This is for hierarchical addressing. - output : str, ['pandas','json'], optional - If the returned data should be in a Pandas DataFrame or in a - JSON serializable format. - - Returns - ------- - int or float: - If there is a single node in the `NodeCollection`, and a single - parameter in params. - array_like: - If there are multiple nodes in the `NodeCollection`, and a single - parameter in params. - dict: - If there are multiple parameters in params. Or, if no parameters - are specified, a dictionary containing aggregated parameter-values - for all nodes is returned. - DataFrame: - Pandas Data frame if output should be in pandas format. - - Raises - ------ - TypeError - If the input params are of the wrong form. - KeyError - If the specified parameter does not exist for the nodes. - - See Also - -------- - :py:func:`set`, - - Examples - -------- - - >>> nodes.get() - {'archiver_length': (0, 0, 0), - 'beta_Ca': (0.001, 0.001, 0.001), - 'C_m': (250.0, 250.0, 250.0), - ... - 'V_th': (-55.0, -55.0, -55.0), - 'vp': (0, 0, 0)} - - >>> nodes.get('V_m') - (-70.0, -70.0, -70.0) - - >>> nodes[0].get('V_m') - -70.0 - - >>> nodes.get('V_m', 'C_m') - {'V_m': (-70.0, -70.0, -70.0), 'C_m': (250.0, 250.0, 250.0)} - - >>> voltmeter.get('events', 'senders') - array([...], dtype=int64) - """ - - if not self: - raise ValueError('Cannot get parameter of empty NodeCollection') - - # ------------------------- # - # Checks of input # - # ------------------------- # - if not kwargs: - output = '' - elif 'output' in kwargs: - output = kwargs['output'] - if output == 'pandas' and not HAVE_PANDAS: - raise ImportError('Pandas could not be imported') - else: - raise TypeError('Got unexpected keyword argument') - - pandas_output = output == 'pandas' - - if len(params) == 0: - # get() is called without arguments - result = nestkernel.llapi_get_nc_status(self._datum) - elif len(params) == 1: - # params is a tuple with a string or list of strings - result = get_parameters(self, params[0]) - if params[0] == 'compartments': - result = Compartments(self, result) - elif params[0] == 'receptors': - result = Receptors(self, result) - else: - # Hierarchical addressing - # TODO-PYNEST-NG: Drop this? Not sure anyone ever used it... - result = get_parameters_hierarchical_addressing(self, params) - - if isinstance(result, dict) and len(self) == 1: - new_result = {} - for k, v in result.items(): - new_result[k] = v[0] if is_iterable(v) and len(v) == 1 else v - result = new_result - - if pandas_output: - index = self.get('global_id') - if len(params) == 1 and isinstance(params[0], str): - # params is a string - result = {params[0]: result} - elif len(params) > 1 and isinstance(params[1], str): - # hierarchical, single string - result = {params[1]: result} - if len(self) == 1: - index = [index] - result = {key: [val] for key, val in result.items()} - result = pandas.DataFrame(result, index=index) - elif output == 'json': - result = to_json(result) - - return result - - def set(self, params=None, **kwargs): - """ - Set the parameters of nodes to params. - - If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values - can be single values or list of the same size as the `NodeCollection`. - - Parameters - ---------- - params : str or dict or list - Dictionary of parameters (either lists or single values) or list of dictionaries of parameters - of same length as the `NodeCollection`. - kwargs : keyword argument pairs - Named arguments of parameters of the elements in the `NodeCollection`. - - Raises - ------ - TypeError - If the input params are of the wrong form. - KeyError - If the specified parameter does not exist for the nodes. - - See Also - -------- - :py:func:`get`, - """ - - if not self: - return - if kwargs and params is None: - params = kwargs - elif kwargs and params: - raise TypeError("must either provide params or kwargs, but not both.") - - local_nodes = [self.local] if len(self) == 1 else self.local - - if isinstance(params, dict) and 'compartments' in params: - if isinstance(params['compartments'], Compartments): - params['compartments'] = params['compartments'].get_tuple() - elif params['compartments'] is None: - # Adding compartments has been handled by the += operator, so we can remove the entry. - params.pop('compartments') - - if isinstance(params, dict) and 'receptors' in params: - if isinstance(params['receptors'], Receptors): - params['receptors'] = params['receptors'].get_tuple() - elif params['receptors'] is None: - # Adding receptors has been handled by the += operator, so we can remove the entry. - params.pop('receptors') - - if isinstance(params, dict) and all(local_nodes): - - node_params = self[0].get() - iterable_node_param = lambda key: key in node_params and not is_iterable(node_params[key]) - contains_list = [is_iterable(vals) and iterable_node_param(key) for key, vals in params.items()] - - if any(contains_list): - temp_param = [{} for _ in range(self.__len__())] - - for key, vals in params.items(): - if not is_iterable(vals): - for temp_dict in temp_param: - temp_dict[key] = vals - else: - for i, temp_dict in enumerate(temp_param): - temp_dict[key] = vals[i] - params = temp_param - - if isinstance(params, dict): - params = [params] - - nestkernel.llapi_set_nc_status(self._datum, params) - - def tolist(self): - """ - Convert `NodeCollection` to list. - """ - if self.__len__() == 0: - return [] - - return (list(self.get('global_id')) if len(self) > 1 - else [self.get('global_id')]) - - def index(self, node_id): - """ - Find the index of a node ID in the `NodeCollection`. - - Parameters - ---------- - node_id : int - Global ID to be found. - - Raises - ------ - ValueError - If the node ID is not in the `NodeCollection`. - """ - index = nestkernel.llapi_nc_find(self._datum, node_id) - - if index == -1: - raise ValueError('{} is not in NodeCollection'.format(node_id)) - - return index - - def __bool__(self): - """Converts the NodeCollection to a bool. False if it is empty, True otherwise.""" - return len(self) > 0 - - def __array__(self, dtype=None): - """Convert the NodeCollection to a NumPy array.""" - return numpy.array(self.tolist(), dtype=dtype) - - def __getattr__(self, attr): - if not self: - raise AttributeError('Cannot get attribute of empty NodeCollection') - - if attr == 'spatial': - metadata = nestkernel.llapi_get_nc_metadata(self._datum) - val = metadata if metadata else None - super().__setattr__(attr, val) - return self.spatial - - # NumPy compatibility check: - # raises AttributeError to tell NumPy that interfaces other than - # __array__ are not available (otherwise get_parameters would be - # queried, KeyError would be raised, and all would crash) - if attr.startswith('__array_'): - raise AttributeError - - return self.get(attr) - - def __setattr__(self, attr, value): - # `_datum` is the only property of NodeCollection that should not be - # interpreted as a property of the model - if attr == '_datum': - super().__setattr__(attr, value) - else: - self.set({attr: value}) - - -class SynapseCollectionIterator: - """ - Iterator class for SynapseCollection. - """ - - def __init__(self, synapse_collection): - self._iter = iter(synapse_collection._datum) - - def __iter__(self): - return self - - def __next__(self): - return SynapseCollection(next(self._iter)) - - -class SynapseCollection: - """ - Class for Connections. - - `SynapseCollection` represents the connections of a network. The class supports indexing, iteration, length and - equality. You can get and set connection parameters by using the membership functions :py:func:`get()` and - :py:func:`set()`. By using the membership function :py:func:`sources()` you get an iterator over - source nodes, while :py:func:`targets()` returns an interator over the target nodes of the connections. - - A SynapseCollection is created by the :py:func:`.GetConnections` function. - """ - - _datum = None - - def __init__(self, data): - - if isinstance(data, list): - for datum in data: - if (not isinstance(datum, nestkernel.ConnectionObject)): - raise TypeError("Expected ConnectionObject.") - self._datum = data - elif data is None: - # We can have an empty SynapseCollection if there are no connections. - self._datum = data - else: - if (not isinstance(data, nestkernel.ConnectionObject)): - raise TypeError("Expected ConnectionObject.") - # self._datum needs to be a list of ConnectionObjects. - self._datum = [data] - - self.print_full = False - - def __iter__(self): - return SynapseCollectionIterator(self) - - def __len__(self): - if self._datum is None: - return 0 - return len(self._datum) - - def __eq__(self, other): - if not isinstance(other, SynapseCollection): - raise NotImplementedError() - - if self.__len__() != other.__len__(): - return False - self_get = self.get(['source', 'target', 'target_thread', - 'synapse_id', 'port']) - other_get = other.get(['source', 'target', 'target_thread', - 'synapse_id', 'port']) - if self_get != other_get: - return False - return True - - def __neq__(self, other): - if not isinstance(other, SynapseCollection): - raise NotImplementedError() - return not self == other - - def __getitem__(self, key): - if isinstance(key, slice): - return SynapseCollection(self._datum[key]) - else: - return SynapseCollection([self._datum[key]]) - - def __str__(self): - """ - Printing a `SynapseCollection` returns something of the form: - - source target synapse model weight delay - -------- -------- --------------- -------- ------- - 1 4 static_synapse 1.000 1.000 - 2 4 static_synapse 2.000 1.000 - 1 3 stdp_synapse 4.000 1.000 - 1 4 stdp_synapse 3.000 1.000 - 2 3 stdp_synapse 3.000 1.000 - 2 4 stdp_synapse 2.000 1.000 - - If your SynapseCollection has more than 36 elements, only the first and last 15 connections are printed. To - display all, first set `print_full = True`. - - :: - - conns = nest.GetConnections() - conns.print_full = True - print(conns) - """ - - def format_row_(s, t, sm, w, dly): - try: - return f'{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}' - except ValueError: - # Used when we have many connections and print_full=False - return f'{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}' - - MAX_SIZE_FULL_PRINT = 35 # 35 is arbitrarily chosen. - - params = self.get() - - if len(params) == 0: - return 'The synapse collection does not contain any connections.' - - srcs = params['source'] - trgt = params['target'] - wght = params['weight'] - dlay = params['delay'] - s_model = params['synapse_model'] - - if isinstance(srcs, int): - srcs = [srcs] - trgt = [trgt] - wght = [wght] - dlay = [dlay] - s_model = [s_model] - - src_h = 'source' - trg_h = 'target' - sm_h = 'synapse model' - w_h = 'weight' - d_h = 'delay' - - # Find maximum number of characters for each column, used to determine width of column - src_len = max(len(src_h) + 2, floor(log(max(srcs), 10))) - trg_len = max(len(trg_h) + 2, floor(log(max(trgt), 10))) - sm_len = max(len(sm_h) + 2, len(max(s_model, key=len))) - w_len = len(w_h) + 2 - d_len = len(d_h) + 2 - - # 35 is arbitrarily chosen. - if len(srcs) >= MAX_SIZE_FULL_PRINT and not self.print_full: - # u'\u22EE ' is the unicode for vertical ellipsis, used when we have many connections - srcs = srcs[:15] + [u'\u22EE '] + srcs[-15:] - trgt = trgt[:15] + [u'\u22EE '] + trgt[-15:] - wght = wght[:15] + [u'\u22EE '] + wght[-15:] - dlay = dlay[:15] + [u'\u22EE '] + dlay[-15:] - s_model = s_model[:15] + [u'\u22EE '] + s_model[-15:] - - headers = f'{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}' + '\n' - borders = '-'*src_len + ' ' + '-'*trg_len + ' ' + '-'*sm_len + ' ' + '-'*w_len + ' ' + '-'*d_len + '\n' - output = '\n'.join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) - result = headers + borders + output - - return result - - def __getattr__(self, attr): - if attr == 'distance': - dist = nestkernel.llapi_distance(self._datum) - super().__setattr__(attr, dist) - return self.distance - - return self.get(attr) - - def __setattr__(self, attr, value): - # `_datum` is the only property of SynapseCollection that should not be - # interpreted as a property of the model - if attr == '_datum' or attr == 'print_full': - super().__setattr__(attr, value) - else: - self.set({attr: value}) - - def sources(self): - """Returns iterator containing the source node IDs of the `SynapseCollection`.""" - sources = self.get('source') - if not isinstance(sources, (list, tuple)): - sources = (sources,) - return iter(sources) - - def targets(self): - """Returns iterator containing the target node IDs of the `SynapseCollection`.""" - targets = self.get('target') - if not isinstance(targets, (list, tuple)): - targets = (targets,) - return iter(targets) - - def get(self, keys=None, output=''): - """ - Return a parameter dictionary of the connections. - - If `keys` is a string, a list of values is returned, unless we have a - single connection, in which case the single value is returned. - `keys` may also be a list, in which case a dictionary with a list of - values is returned. - - Parameters - ---------- - keys : str or list, optional - String or a list of strings naming model properties. get - then returns a single value or a dictionary with lists of values - belonging to the given `keys`. - output : str, ['pandas','json'], optional - If the returned data should be in a Pandas DataFrame or in a - JSON serializable format. - - Returns - ------- - dict: - All parameters, or, if keys is a list of strings, a dictionary with - lists of corresponding parameters - type: - If keys is a string, the corresponding parameter(s) is returned - - - Raises - ------ - TypeError - If input params are of the wrong form. - KeyError - If the specified parameter does not exist for the connections. - - See Also - -------- - set - - Examples - -------- - - >>> conns.get() - {'delay': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - ... - 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} - - >>> conns.get('weight') - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] - - >>> conns[0].get('weight') - 1.0 - - >>> nodes.get(['source', 'weight']) - {'source': [1, 1, 1, 2, 2, 2, 3, 3, 3], - 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} - """ - pandas_output = output == 'pandas' - if pandas_output and not HAVE_PANDAS: - raise ImportError('Pandas could not be imported') - - # Return empty dictionary if we have no connections or if we have done a nest.ResetKernel() - num_conns = GetKernelStatus('num_connections') # Has to be called first because it involves MPI communication. - if self.__len__() == 0 or num_conns == 0: - # Return empty tuple if get is called with an argument - return {} if keys is None else () - - if keys is None: - result = nestkernel.llapi_get_connection_status(self._datum) - elif isinstance(keys, str): - # Extracting the correct values will be done in restructure_data below - result = nestkernel.llapi_get_connection_status(self._datum) - elif is_iterable(keys): - result = [[d[key] for key in keys] for d in nestkernel.llapi_get_connection_status(self._datum)] - else: - raise TypeError("keys should be either a string or an iterable") - - # Need to restructure the data. - final_result = restructure_data(result, keys) - - if pandas_output: - index = (self.get('source') if self.__len__() > 1 else - (self.get('source'),)) - if isinstance(keys, str): - final_result = {keys: final_result} - final_result = pandas.DataFrame(final_result, index=index) - elif output == 'json': - final_result = to_json(final_result) - - return final_result - - def set(self, params=None, **kwargs): - """ - Set the parameters of the connections to `params`. - - If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values - can be single values or list of the same size as the `SynapseCollection`. - - Parameters - ---------- - params : str or dict or list - Dictionary of parameters (either lists or single values) or list of dictionaries of parameters - of same length as `SynapseCollection`. - kwargs : keyword argument pairs - Named arguments of parameters of the elements in the `SynapseCollection`. - - Raises - ------ - TypeError - If input params are of the wrong form. - KeyError - If the specified parameter does not exist for the connections. - - See Also - -------- - get - """ - - # This was added to ensure that the function is a nop (instead of, - # for instance, raising an exception) when applied to an empty - # SynapseCollection, or after having done a nest.ResetKernel(). - if self.__len__() == 0 or GetKernelStatus('network_size') == 0: - return - - if (isinstance(params, (list, tuple)) and - self.__len__() != len(params)): - raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__())) - - if kwargs and params is None: - params = kwargs - elif kwargs and params: - raise TypeError("must either provide params or kwargs, but not both.") - - if isinstance(params, dict): - node_params = self[0].get() - contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for - key, vals in params.items()] - - if any(contains_list): - temp_param = [{} for _ in range(self.__len__())] - - for key, vals in params.items(): - if not is_iterable(vals): - for temp_dict in temp_param: - temp_dict[key] = vals - else: - for i, temp_dict in enumerate(temp_param): - temp_dict[key] = vals[i] - params = temp_param - - nestkernel.llapi_set_connection_status(self._datum, params) - - def disconnect(self): - """ - Disconnect the connections in the `SynapseCollection`. - """ - sps(self._datum) - sr('Disconnect_a') - - -class CollocatedSynapses: - """ - Class for collocated synapse specifications. - - Wrapper around a list of specifications, used when calling :py:func:`.Connect`. - - Example - ------- - - :: - - nodes = nest.Create('iaf_psc_alpha', 3) - syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, - {'synapse_model': 'stdp_synapse'}, - {'synapse_model': 'stdp_synapse', 'alpha': 3.}) - nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) - - conns = nest.GetConnections() - - print(conns.alpha) - print(len(syn_spec)) - """ - - def __init__(self, *args): - self.syn_specs = args - - def __len__(self): - return len(self.syn_specs) - - -class Mask: - """ - Class for spatial masks. - - Masks are used when creating connections when nodes have spatial extent. A mask - describes the area of the pool population that shall be searched to find nodes to - connect to for any given node in the driver population. Masks are created using - the :py:func:`.CreateMask` command. - """ - - _datum = None - - # The constructor should not be called by the user - def __init__(self, datum): - """Masks must be created using the CreateMask command.""" - if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype": - raise TypeError("expected mask Datum") - self._datum = datum - - # Generic binary operation - def _binop(self, op, other): - if not isinstance(other, Mask): - raise NotImplementedError() - return sli_func(op, self._datum, other._datum) - - def __or__(self, other): - return self._binop("or", other) - - def __and__(self, other): - return self._binop("and", other) - - def __sub__(self, other): - return self._binop("sub", other) - - def Inside(self, point): - """ - Test if a point is inside a mask. - - Parameters - ---------- - point : tuple/list of float values - Coordinate of point - - Returns - ------- - out : bool - True if the point is inside the mask, False otherwise - """ - return sli_func("Inside", point, self._datum) - - -# TODO-PYNEST-NG: We may consider moving the entire (or most of) Parameter class to the cython level. -class Parameter: - """ - Class for parameters - - A parameter may be used as a probability kernel when creating - connections and nodes or as synaptic parameters (such as weight and delay). - Parameters are created using the :py:func:`.CreateParameter` command. - """ - - _datum = None - - # The constructor should not be called by the user - def __init__(self, datum): - """Parameters must be created using the CreateParameter command.""" - if not isinstance(datum, nestkernel.ParameterObject): - raise TypeError("expected low-level parameter object;" - " use the CreateParameter() function to create a Parameter") - self._datum = datum - - def _arg_as_parameter(self, arg): - if isinstance(arg, Parameter): - return arg - if isinstance(arg, (int, float)): - # Value for the constant parameter must be float. - return CreateParameter('constant', {'value': float(arg)}) - raise NotImplementedError() - - def __add__(self, other): - return nestkernel.llapi_add_parameter(self._datum, self._arg_as_parameter(other)._datum) - - def __radd__(self, other): - return self + other - - def __sub__(self, other): - return nestkernel.llapi_subtract_parameter(self._datum, self._arg_as_parameter(other)._datum) - - def __rsub__(self, other): - return self * (-1) + other - - def __neg__(self): - return self * (-1) - - def __mul__(self, other): - return nestkernel.llapi_multiply_parameter(self._datum, self._arg_as_parameter(other)._datum) - - def __rmul__(self, other): - return self * other - - def __div__(self, other): - return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) - - def __truediv__(self, other): - return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) - - def __pow__(self, exponent): - return nestkernel.llapi_pow_parameter(self._datum, self._arg_as_parameter(float(exponent))._datum) - - def __lt__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 0}) - - def __le__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 1}) - - def __eq__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 2}) - - def __ne__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 3}) - - def __ge__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 4}) - - def __gt__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 5}) - - def GetValue(self): - """ - Compute value of parameter. - - Returns - ------- - out : value - The value of the parameter - - See also - -------- - CreateParameter - - Example - ------- - :: - - import nest - - # normal distribution parameter - P = nest.CreateParameter('normal', {'mean': 0.0, 'std': 1.0}) - - # get out value - P.GetValue() - """ - return nestkernel.llapi_get_param_value(self._datum) - - def is_spatial(self): - return nestkernel.llapi_param_is_spatial(self._datum) - - def apply(self, spatial_nc, positions=None): - if positions is None: - return nestkernel.llapi_apply_parameter(self._datum, spatial_nc) - else: - if len(spatial_nc) != 1: - raise ValueError('The NodeCollection must contain a single node ID only') - if not isinstance(positions, (list, tuple)): - raise TypeError('Positions must be a list or tuple of positions') - for pos in positions: - if not isinstance(pos, (list, tuple, numpy.ndarray)): - raise TypeError('Each position must be a list or tuple') - if len(pos) != len(positions[0]): - raise ValueError('All positions must have the same number of dimensions') - return nestkernel.llapi_apply_parameter(self._datum, {'source': spatial_nc, 'targets': positions}) - - -class CmBase: - - def __init__(self, node_collection, elements): - if not isinstance(node_collection, NodeCollection): - raise TypeError(f'node_collection must be a NodeCollection, got {type(node_collection)}') - if not isinstance(elements, tuple): - raise TypeError(f'elements must be a tuple of dicts, got {type(elements)}') - self._elements = elements - self._node_collection = node_collection - - def __add__(self, other): - new_elements = list(self._elements) - if isinstance(other, dict): - new_elements += [other] - elif isinstance(other, (tuple, list)): - if not all(isinstance(d, dict) for d in other): - raise TypeError( - f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' - f'or other {self.__class__.__name__}') - new_elements += list(other) - elif isinstance(other, self.__class__): - new_elements += list(other._elements) - else: - raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' - f' or other {self.__class__.__name__}, got {type(other)}') - - return self.__class__(self._node_collection, tuple(new_elements)) - - def __iadd__(self, other): - if isinstance(other, dict): - new_elements = [other] - elif isinstance(other, (tuple, list)): - if not all(isinstance(d, dict) for d in other): - raise TypeError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' - f'or other {self.__class__.__name__}') - new_elements = list(other) - elif isinstance(other, self.__class__): - new_elements = list(other._elements) - else: - raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' - f' or other {self.__class__.__name__}, got {type(other)}') - self._node_collection.set({f'add_{self.__class__.__name__.lower()}': new_elements}) - return None # Flagging elements as added by returning None - - def __getitem__(self, key): - return self._elements[key] - - def __str__(self): - return str(self._elements) - - def get_tuple(self): - return self._elements - - -class Compartments(CmBase): - # No specialization here because all is done in the base class based on the class name. - pass - - -class Receptors(CmBase): - # No specialization here because all is done in the base class based on the class name. - pass - - -def serializable(data): - """Make data serializable for JSON. - - Parameters - ---------- - data : any - - Returns - ------- - data_serialized : str, int, float, list, dict - Data can be encoded to JSON - """ - - if isinstance(data, (numpy.ndarray, NodeCollection)): - return data.tolist() - if isinstance(data, SynapseCollection): - # Get full information from SynapseCollection - return serializable(data.get()) - if isinstance(data, (list, tuple)): - return [serializable(d) for d in data] - if isinstance(data, dict): - return dict([(key, serializable(value)) for key, value in data.items()]) - return data - - -def to_json(data, **kwargs): - """Serialize data to JSON. - - Parameters - ---------- - data : any - kwargs : keyword argument pairs - Named arguments of parameters for `json.dumps` function. - - Returns - ------- - data_json : str - JSON format of the data - """ - - data_serialized = serializable(data) - data_json = json.dumps(data_serialized, **kwargs) - return data_json diff --git a/pynest/nest/lib/hl_api_connection_helpers.py b/pynest/nest/lib/hl_api_connection_helpers.py index 6c119d7c72..c879f76d13 100644 --- a/pynest/nest/lib/hl_api_connection_helpers.py +++ b/pynest/nest/lib/hl_api_connection_helpers.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_connection_helpers.py +# _hl_api_connection_helpers.py # # This file is part of NEST. # @@ -25,13 +25,13 @@ """ import copy - import numpy as np +from .._ll_api import * +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ..ll_api import * -from .hl_api_exceptions import NESTError, NESTErrors -from .hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter +from ._hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter +from ._hl_api_exceptions import NESTErrors __all__ = [ "_connect_layers_needed", @@ -74,8 +74,10 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar rule = conn_spec["rule"] if isinstance(syn_spec, dict): - if "synapse_model" in syn_spec and not isinstance(syn_spec["synapse_model"], str): - raise NESTErrors.NESTError("'synapse_model' must be a string") + if "synapse_model" in syn_spec and not isinstance( + syn_spec["synapse_model"], str + ): + raise kernel.NESTError("'synapse_model' must be a string") for key, value in syn_spec.items(): # if value is a list, it is converted to a numpy array if isinstance(value, (list, tuple)): @@ -86,11 +88,13 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar if rule == "one_to_one": if value.shape[0] != prelength: if use_connect_arrays: - raise nestkernel.NESTError( - "'{}' has to be an array of dimension {}.".format(key, prelength) + raise kernel.NESTError( + "'{}' has to be an array of dimension {}.".format( + key, prelength + ) ) else: - raise NESTErrors.NESTError( + raise kernel.NESTError( "'{}' has to be an array of dimension {}, a scalar or a dictionary.".format( key, prelength ) @@ -99,7 +103,7 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar syn_spec[key] = value elif rule == "fixed_total_number": if "N" in conn_spec and value.shape[0] != conn_spec["N"]: - raise nestkernel.NESTError( + raise kernel.NESTError( "'{}' has to be an array of dimension {}, a scalar or a dictionary".format( key, conn_spec["N"] ) @@ -107,15 +111,17 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar else: syn_spec[key] = value else: - raise NESTErrors.NESTError( + raise kernel.NESTError( "'{}' has the wrong type. One-dimensional parameter arrays can only be used in " - "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format(key) + "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format( + key + ) ) elif len(value.shape) == 2: if rule == "all_to_all": if value.shape[0] != postlength or value.shape[1] != prelength: - raise NESTErrors.NESTError( + raise kernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_target x n_sources), a scalar " "or a dictionary.".format(key, postlength, prelength) ) @@ -124,7 +130,7 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar elif rule == "fixed_indegree": indegree = conn_spec["indegree"] if value.shape[0] != postlength or value.shape[1] != indegree: - raise nestkernel.NESTError( + raise kernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_target x indegree), a scalar " "or a dictionary.".format(key, postlength, indegree) ) @@ -133,16 +139,18 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar elif rule == "fixed_outdegree": outdegree = conn_spec["outdegree"] if value.shape[0] != prelength or value.shape[1] != outdegree: - raise nestkernel.NESTError( + raise kernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_sources x outdegree), a scalar " "or a dictionary.".format(key, prelength, outdegree) ) else: syn_spec[key] = value.flatten() else: - raise NESTErrors.NESTError( + raise kernel.NESTError( "'{}' has the wrong type. Two-dimensional parameter arrays can only be used in " - "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format(key) + "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format( + key + ) ) # check that "synapse_model" is there for use_connect_arrays @@ -171,10 +179,20 @@ def _process_spatial_projections(conn_spec, syn_spec): "use_on_source", "allow_oversized_mask", ] - allowed_syn_spec_keys = ["weight", "delay", "synapse_model", "synapse_label", "receptor_type"] + allowed_syn_spec_keys = [ + "weight", + "delay", + "synapse_model", + "synapse_label", + "receptor_type", + ] for key in conn_spec.keys(): if key not in allowed_conn_spec_keys: - raise ValueError("'{}' is not allowed in conn_spec when connecting with mask or kernel".format(key)) + raise ValueError( + "'{}' is not allowed in conn_spec when connecting with mask or kernel".format( + key + ) + ) projections = {} projections.update(conn_spec) @@ -186,22 +204,32 @@ def _process_spatial_projections(conn_spec, syn_spec): for key in syn_list.keys(): if key not in allowed_syn_spec_keys: raise ValueError( - "'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key) + "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( + key + ) ) projections.update({"synapse_parameters": syn_spec.syn_specs}) else: for key in syn_spec.keys(): if key not in allowed_syn_spec_keys: - raise ValueError("'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key)) + raise ValueError( + "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( + key + ) + ) projections.update(syn_spec) if conn_spec["rule"] == "fixed_indegree": if "use_on_source" in conn_spec: - raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") + raise ValueError( + "'use_on_source' can only be set when using pairwise_bernoulli" + ) projections["connection_type"] = "pairwise_bernoulli_on_source" projections["number_of_connections"] = projections.pop("indegree") elif conn_spec["rule"] == "fixed_outdegree": if "use_on_source" in conn_spec: - raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") + raise ValueError( + "'use_on_source' can only be set when using pairwise_bernoulli" + ) projections["connection_type"] = "pairwise_bernoulli_on_target" projections["number_of_connections"] = projections.pop("outdegree") elif conn_spec["rule"] == "pairwise_bernoulli": @@ -213,7 +241,7 @@ def _process_spatial_projections(conn_spec, syn_spec): if "use_on_source" in projections: projections.pop("use_on_source") else: - raise nestkernel.NESTError( + raise kernel.NESTError( "When using kernel or mask, the only possible connection rules are " "'pairwise_bernoulli', 'fixed_indegree', or 'fixed_outdegree'" ) @@ -230,7 +258,11 @@ def _connect_layers_needed(conn_spec, syn_spec): return True # We must use ConnectLayers in some additional cases. rule_is_bernoulli = "pairwise_bernoulli" in str(conn_spec["rule"]) - if "mask" in conn_spec or ("p" in conn_spec and not rule_is_bernoulli) or "use_on_source" in conn_spec: + if ( + "mask" in conn_spec + or ("p" in conn_spec and not rule_is_bernoulli) + or "use_on_source" in conn_spec + ): return True # If a syn_spec entry is based on spatial properties, we must use ConnectLayers. if isinstance(syn_spec, dict): @@ -238,13 +270,18 @@ def _connect_layers_needed(conn_spec, syn_spec): if isinstance(item, Parameter) and item.is_spatial(): return True elif isinstance(syn_spec, CollocatedSynapses): - return any([_connect_layers_needed(conn_spec, syn_param) for syn_param in syn_spec.syn_specs]) + return any( + [ + _connect_layers_needed(conn_spec, syn_param) + for syn_param in syn_spec.syn_specs + ] + ) # If we get here, there is not need to use ConnectLayers. return False def _connect_spatial(pre, post, projections): - """Connect ``pre`` to ``post`` using the specifications in ``projections``.""" + """Connect `pre` to `post` using the specifications in `projections`.""" def fixdict(d): for k, v in d.items(): @@ -276,7 +313,9 @@ def _process_input_nodes(pre, post, conn_spec): # check for 'one_to_one' conn_spec one_to_one_cspec = ( - conn_spec if not isinstance(conn_spec, dict) else conn_spec.get("rule", "all_to_all") == "one_to_one" + conn_spec + if not isinstance(conn_spec, dict) + else conn_spec.get("rule", "all_to_all") == "one_to_one" ) # check and convert input types @@ -299,7 +338,8 @@ def _process_input_nodes(pre, post, conn_spec): if not pre_is_nc or not post_is_nc: if len(pre) != len(post): raise NESTErrors.ArgumentType( - "Connect", "If `pre` or `post` contain non-unique IDs, then they must have the same length." + "Connect", + "If `pre` or `post` contain non-unique IDs, then they must have the same length.", ) # convert to arrays @@ -320,6 +360,8 @@ def _process_input_nodes(pre, post, conn_spec): use_connect_arrays = True if use_connect_arrays and not one_to_one_cspec: - raise ValueError("When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'.") + raise ValueError( + "When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'." + ) return use_connect_arrays, pre, post diff --git a/pynest/nest/lib/hl_api_connections.py b/pynest/nest/lib/hl_api_connections.py index ff89497182..17f7ba3909 100644 --- a/pynest/nest/lib/hl_api_connections.py +++ b/pynest/nest/lib/hl_api_connections.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_connections.py +# _hl_api_connections.py # # This file is part of NEST. # @@ -25,18 +25,21 @@ import numpy +from .._ll_api import connect_arrays +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from .hl_api_connection_helpers import ( + +from ._hl_api_connection_helpers import ( + _process_input_nodes, _connect_layers_needed, _connect_spatial, _process_conn_spec, - _process_input_nodes, _process_spatial_projections, _process_syn_spec, ) -from .hl_api_nodes import Create -from .hl_api_parallel_computing import NumProcesses -from .hl_api_types import Mask, NodeCollection, Parameter, SynapseCollection +from ._hl_api_nodes import Create +from ._hl_api_parallel_computing import NumProcesses +from ._hl_api_types import NodeCollection, SynapseCollection, Mask, Parameter __all__ = [ "Connect", @@ -131,7 +134,7 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F Raises ------ - nestkernel.NESTError + kernel.NESTError Notes ----- @@ -186,7 +189,7 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F will be used. Distributed parameters can be defined through NEST's different parametertypes. NEST has various - random parameters, spatial parameters and distributions (only accessible for nodes with spatial positions), + random parameters, spatial parameters and distributions (only accesseable for nodes with spatial positions), logical expressions and mathematical expressions, which can be used to define node and connection parameters. To see all available parameters, see documentation defined in distributions, logic, math, @@ -196,20 +199,23 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F --------- :ref:`connection_management` """ - use_connect_arrays, pre, post = _process_input_nodes(pre, post, conn_spec) # Converting conn_spec to dict, without putting it on the SLI stack. processed_conn_spec = _process_conn_spec(conn_spec) # If syn_spec is given, its contents are checked, and if needed converted # to the right formats. - processed_syn_spec = _process_syn_spec(syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays) + processed_syn_spec = _process_syn_spec( + syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays + ) # If pre and post are arrays of node IDs, and conn_spec is unspecified, # the node IDs are connected one-to-one. if use_connect_arrays: if return_synapsecollection: - raise ValueError("SynapseCollection cannot be returned when connecting two arrays of node IDs") + raise ValueError( + "SynapseCollection cannot be returned when connecting two arrays of node IDs" + ) if processed_syn_spec is None: raise ValueError( @@ -223,8 +229,16 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F if "delays" in processed_syn_spec: raise ValueError("To specify delays, use 'delay' in syn_spec.") - weights = numpy.array(processed_syn_spec["weight"]) if "weight" in processed_syn_spec else None - delays = numpy.array(processed_syn_spec["delay"]) if "delay" in processed_syn_spec else None + weights = ( + numpy.array(processed_syn_spec["weight"]) + if "weight" in processed_syn_spec + else None + ) + delays = ( + numpy.array(processed_syn_spec["delay"]) + if "delay" in processed_syn_spec + else None + ) try: synapse_model = processed_syn_spec["synapse_model"] @@ -237,11 +251,15 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F # Split remaining syn_spec entries to key and value arrays reduced_processed_syn_spec = { k: processed_syn_spec[k] - for k in set(processed_syn_spec.keys()).difference(set(("weight", "delay", "synapse_model"))) + for k in set(processed_syn_spec.keys()).difference( + set(("weight", "delay", "synapse_model")) + ) } if len(reduced_processed_syn_spec) > 0: - syn_param_keys = numpy.array(list(reduced_processed_syn_spec.keys()), dtype=numpy.string_) + syn_param_keys = numpy.array( + list(reduced_processed_syn_spec.keys()), dtype=numpy.string_ + ) syn_param_values = numpy.zeros([len(reduced_processed_syn_spec), len(pre)]) for i, value in enumerate(reduced_processed_syn_spec.values()): @@ -250,7 +268,9 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F syn_param_keys = None syn_param_values = None - nestkernel.ll_api_connect_arrays(pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values) + connect_arrays( + pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values + ) return if not isinstance(pre, NodeCollection): @@ -267,17 +287,21 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F raise TypeError("Presynaptic NodeCollection must have spatial information") # Create the projection dictionary - spatial_projections = _process_spatial_projections(processed_conn_spec, processed_syn_spec) + spatial_projections = _process_spatial_projections( + processed_conn_spec, processed_syn_spec + ) _connect_spatial(pre._datum, post._datum, spatial_projections) else: - nestkernel.llapi_connect(pre._datum, post._datum, processed_conn_spec, processed_syn_spec) + nestkernel.llapi_connect( + pre._datum, post._datum, processed_conn_spec, processed_syn_spec + ) if return_synapsecollection: return GetConnections(pre, post) def Disconnect(*args, conn_spec=None, syn_spec=None): - """Disconnect connections in a SynapseCollection, or `pre` neurons from `post` neurons. + """Disconnect connections in a SynnapseCollection, or `pre` neurons from `post` neurons. When specifying `pre` and `post` nodes, they are disconnected using the specified disconnection rule (one-to-one by default) and synapse type (:cpp:class:`static_synapse ` by default). @@ -286,7 +310,7 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): Parameters ---------- args : SynapseCollection or NodeCollections - Either a collection of connections to disconnect, or pre- and postsynaptic nodes given as NodeCollections + Either a collection of connections to disconnect, or pre- and postsynaptic nodes given as `NodeCollection`s conn_spec : str or dict Disconnection rule when specifying pre- and postsynaptic nodes, see below syn_spec : str or dict @@ -300,9 +324,9 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): Apply the same rules as for connectivity specs in the :py:func:`.Connect` method Possible choices of the conn_spec are - - - 'one_to_one' - - 'all_to_all' + :: + - 'one_to_one' + - 'all_to_all' **syn_spec** @@ -340,21 +364,33 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): if len(args) == 1: synapsecollection = args[0] if not isinstance(synapsecollection, SynapseCollection): - raise TypeError("Arguments must be either a SynapseCollection or two NodeCollections") + raise TypeError( + "Arguments must be either a SynapseCollection or two NodeCollections" + ) if conn_spec is not None or syn_spec is not None: - raise ValueError("When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified") + raise ValueError( + "When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified" + ) synapsecollection.disconnect() elif len(args) == 2: # Fill default values conn_spec = "one_to_one" if conn_spec is None else conn_spec syn_spec = "static_synapse" if syn_spec is None else syn_spec - if isinstance(conn_spec, str): + if is_string(conn_spec): conn_spec = {"rule": conn_spec} - if isinstance(syn_spec, str): + if is_string(syn_spec): syn_spec = {"synapse_model": syn_spec} pre, post = args if not isinstance(pre, NodeCollection) or not isinstance(post, NodeCollection): - raise TypeError("Arguments must be either a SynapseCollection or two NodeCollections") - nestkernel.llapi_disconnect(pre._datum, post._datum, conn_spec, syn_spec) + raise TypeError( + "Arguments must be either a SynapseCollection or two NodeCollections" + ) + sps(pre) + sps(post) + sps(conn_spec) + sps(syn_spec) + sr("Disconnect_g_g_D_D") else: - raise TypeError("Arguments must be either a SynapseCollection or two NodeCollections") + raise TypeError( + "Arguments must be either a SynapseCollection or two NodeCollections" + ) diff --git a/pynest/nest/lib/hl_api_exceptions.py b/pynest/nest/lib/hl_api_exceptions.py index 0dbfefa1d1..cadd5797f9 100644 --- a/pynest/nest/lib/hl_api_exceptions.py +++ b/pynest/nest/lib/hl_api_exceptions.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_exceptions.py +# _hl_api_exceptions.py # # This file is part of NEST. # @@ -27,7 +27,7 @@ class NESTMappedException(type): below gets called, creating a class with that name (the error name) and with an __init__ taking commandname and errormessage (as created in the source) which is a closure on the parent and errorname as well, with a parent of default type (self.default_parent) or - self.parents[errorname] if defined.""" + self.parents[errorname] if defined. """ def __getattr__(cls, errorname): """Creates a class of type "errorname" which is a child of cls.default_parent or @@ -35,7 +35,7 @@ def __getattr__(cls, errorname): This __getattr__ function also stores the class permanently as an attribute of cls for re-use where cls is actually the class that triggered the getattr (the class that - NESTMappedException is a metaclass of).""" + NESTMappedException is a metaclass of). """ # Dynamic class construction, first check if we know its parent if errorname in cls.parents: @@ -47,18 +47,17 @@ def __getattr__(cls, errorname): # not NESTMappedException, since that would mean the metaclass would let the new class inherit # this __getattr__, allowing unintended dynamic construction of attributes newclass = type( - cls.__name__ + "." + errorname, + cls.__name__ + '.' + errorname, (parent,), { - "__init__": cls.init(parent, errorname), - "__doc__": """Dynamically created exception {} from {}. + '__init__': cls.init(parent, errorname), + '__doc__': + """Dynamically created exception {} from {}. Created for the namespace: {}. Parent exception: {}. - """.format( - errorname, cls.source, cls.__name__, parent.__name__ - ), - }, + """.format(errorname, cls.source, cls.__name__, parent.__name__) + } ) # Cache for reuse: __getattr__ should now not get called if requested again @@ -75,7 +74,8 @@ class NESTErrors(metaclass=NESTMappedException): """ class NESTError(Exception): - """Base exception class for all NEST exceptions.""" + """Base exception class for all NEST exceptions. + """ def __init__(self, message): """Initializer for NESTError base class. @@ -90,9 +90,10 @@ def __init__(self, message): self.message = message class SLIException(NESTError): - """Base class for all exceptions coming from sli.""" + """Base class for all exceptions coming from sli. + """ - def __init__(self, commandname, errormessage, errorname="SLIException"): + def __init__(self, commandname, errormessage, errorname='SLIException'): """Initialize function. Parameters: @@ -109,13 +110,13 @@ def __init__(self, commandname, errormessage, errorname="SLIException"): self.errormessage = errormessage class PyNESTError(NESTError): - """Exceptions produced from Python/Cython code.""" - + """Exceptions produced from Python/Cython code. + """ pass @staticmethod def init(parent, errorname): - """Static class method to construct init's for SLIException children. + """ Static class method to construct init's for SLIException children. Construct our new init with closure on errorname (as a default value) and parent. The default value allows the __init__ to be chained and set by the leaf child. @@ -135,7 +136,8 @@ def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwar # recursively init the parent class: all of this is only needed to properly set errorname parent.__init__(self, commandname, errormessage, *args, errorname=errorname, **kwargs) - docstring = """Initialization function. + docstring = \ + """Initialization function. Parameters: ----------- @@ -146,9 +148,7 @@ def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwar *args, **kwargs: passed through to base class. self will be a descendant of {}. - """.format( - errorname, parent.__name__ - ) + """.format(errorname, parent.__name__) try: __init__.__doc__ = docstring @@ -167,52 +167,52 @@ def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwar source = "SLI" default_parent = SLIException parents = { - "TypeMismatch": "InterpreterError", - "SystemSignal": "InterpreterError", - "RangeCheck": "InterpreterError", - "ArgumentType": "InterpreterError", - "BadParameterValue": "SLIException", - "DictError": "InterpreterError", - "UndefinedName": "DictError", - "EntryTypeMismatch": "DictError", - "StackUnderflow": "InterpreterError", - "IOError": "SLIException", - "UnaccessedDictionaryEntry": "DictError", - "UnknownModelName": "KernelException", - "NewModelNameExists": "KernelException", - "ModelInUse": "KernelException", - "UnknownSynapseType": "KernelException", - "UnknownNode": "KernelException", - "NoThreadSiblingsAvailable": "KernelException", - "LocalNodeExpected": "KernelException", - "NodeWithProxiesExpected": "KernelException", - "UnknownReceptorType": "KernelException", - "IncompatibleReceptorType": "KernelException", - "UnknownPort": "KernelException", - "IllegalConnection": "KernelException", - "InexistentConnection": "KernelException", - "UnknownThread": "KernelException", - "BadDelay": "KernelException", - "UnexpectedEvent": "KernelException", - "UnsupportedEvent": "KernelException", - "BadProperty": "KernelException", - "BadParameter": "KernelException", - "DimensionMismatch": "KernelException", - "DistributionError": "KernelException", - "InvalidDefaultResolution": "KernelException", - "InvalidTimeInModel": "KernelException", - "StepMultipleRequired": "KernelException", - "TimeMultipleRequired": "KernelException", - "GSLSolverFailure": "KernelException", - "NumericalInstability": "KernelException", - "KeyError": "KernelException", - "MUSICPortUnconnected": "KernelException", - "MUSICPortHasNoWidth": "KernelException", - "MUSICPortAlreadyPublished": "KernelException", - "MUSICSimulationHasRun": "KernelException", - "MUSICChannelUnknown": "KernelException", - "MUSICPortUnknown": "KernelException", - "MUSICChannelAlreadyMapped": "KernelException", + 'TypeMismatch': 'InterpreterError', + 'SystemSignal': 'InterpreterError', + 'RangeCheck': 'InterpreterError', + 'ArgumentType': 'InterpreterError', + 'BadParameterValue': 'SLIException', + 'DictError': 'InterpreterError', + 'UndefinedName': 'DictError', + 'EntryTypeMismatch': 'DictError', + 'StackUnderflow': 'InterpreterError', + 'IOError': 'SLIException', + 'UnaccessedDictionaryEntry': 'DictError', + 'UnknownModelName': 'KernelException', + 'NewModelNameExists': 'KernelException', + 'ModelInUse': 'KernelException', + 'UnknownSynapseType': 'KernelException', + 'UnknownNode': 'KernelException', + 'NoThreadSiblingsAvailable': 'KernelException', + 'LocalNodeExpected': 'KernelException', + 'NodeWithProxiesExpected': 'KernelException', + 'UnknownReceptorType': 'KernelException', + 'IncompatibleReceptorType': 'KernelException', + 'UnknownPort': 'KernelException', + 'IllegalConnection': 'KernelException', + 'InexistentConnection': 'KernelException', + 'UnknownThread': 'KernelException', + 'BadDelay': 'KernelException', + 'UnexpectedEvent': 'KernelException', + 'UnsupportedEvent': 'KernelException', + 'BadProperty': 'KernelException', + 'BadParameter': 'KernelException', + 'DimensionMismatch': 'KernelException', + 'DistributionError': 'KernelException', + 'InvalidDefaultResolution': 'KernelException', + 'InvalidTimeInModel': 'KernelException', + 'StepMultipleRequired': 'KernelException', + 'TimeMultipleRequired': 'KernelException', + 'GSLSolverFailure': 'KernelException', + 'NumericalInstability': 'KernelException', + 'KeyError': 'KernelException', + 'MUSICPortUnconnected': 'KernelException', + 'MUSICPortHasNoWidth': 'KernelException', + 'MUSICPortAlreadyPublished': 'KernelException', + 'MUSICSimulationHasRun': 'KernelException', + 'MUSICChannelUnknown': 'KernelException', + 'MUSICPortUnknown': 'KernelException', + 'MUSICChannelAlreadyMapped': 'KernelException' } diff --git a/pynest/nest/lib/hl_api_helper.py b/pynest/nest/lib/hl_api_helper.py index 3034b5c8ba..425bbe6b38 100644 --- a/pynest/nest/lib/hl_api_helper.py +++ b/pynest/nest/lib/hl_api_helper.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_helper.py +# _hl_api_helper.py # # This file is part of NEST. # @@ -24,45 +24,59 @@ API of the PyNEST wrapper. """ -import functools +import warnings import json +import functools +import textwrap +import subprocess import os +import re +import shlex +import sys +import numpy import pydoc -import textwrap -import warnings -from string import Template -import nest +from string import Template +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel +import nest __all__ = [ - "broadcast", - "deprecated", - "get_parameters", - "get_parameters_hierarchical_addressing", - "get_wrapped_text", - "is_iterable", - "load_help", - "model_deprecation_warning", - "restructure_data", - "show_deprecation_warning", - "show_help_with_pager", - "stringify_path", # TODO PYNEST-NG: remove? - "SuppressedDeprecationWarning", + 'broadcast', + 'deprecated', + 'get_parameters', + 'get_parameters_hierarchical_addressing', + 'get_wrapped_text', + 'is_coercible_to_sli_array', + 'is_iterable', + 'is_sequence_of_connections', + 'is_sequence_of_node_ids', + 'load_help', + 'model_deprecation_warning', + 'restructure_data', + 'show_deprecation_warning', + 'show_help_with_pager', + 'SuppressedDeprecationWarning', + 'uni_str', ] # These flags are used to print deprecation warnings only once. # Only flags for special cases need to be entered here, such as special models # or function parameters, all flags for deprecated functions will be registered # by the @deprecated decorator, and therefore does not manually need to be placed here. -_deprecation_warning = {"deprecated_model": {"deprecation_issued": False, "replacement": "replacement_mod"}} +_deprecation_warning = {'deprecated_model': {'deprecation_issued': False, + 'replacement': 'replacement_mod'}, + 'iaf_psc_alpha_canon': {'deprecation_issued': False, + 'replacement': 'iaf_psc_alpha_ps'}, + 'pp_pop_psc_delta': {'deprecation_issued': False, + 'replacement': 'gif_pop_psc_exp'}} def format_Warning(message, category, filename, lineno, line=None): """Formats deprecation warning.""" - return "%s:%s: %s:%s\n" % (filename, lineno, category.__name__, message) + return '%s:%s: %s:%s\n' % (filename, lineno, category.__name__, message) warnings.formatwarning = format_Warning @@ -101,15 +115,14 @@ def show_deprecation_warning(func_name, alt_func_name=None, text=None): Text to display instead of standard text """ if func_name in _deprecation_warning: - if not _deprecation_warning[func_name]["deprecation_issued"]: + if not _deprecation_warning[func_name]['deprecation_issued']: if text is None: - text = ( - "{0} is deprecated and will be removed in a future version of NEST.\n" "Please use {1} instead!" - ).format(func_name, alt_func_name) + text = ("{0} is deprecated and will be removed in a future version of NEST.\n" + "Please use {1} instead!").format(func_name, alt_func_name) text = get_wrapped_text(text) - warnings.warn("\n" + text) # add LF so text starts on new line - _deprecation_warning[func_name]["deprecation_issued"] = True + warnings.warn('\n' + text) # add LF so text starts on new line + _deprecation_warning[func_name]['deprecation_issued'] = True # Since we need to pass extra arguments to the decorator, we need a @@ -133,45 +146,64 @@ def deprecated(alt_func_name, text=None): """ def deprecated_decorator(func): - _deprecation_warning[func.__name__] = {"deprecation_issued": False} + _deprecation_warning[func.__name__] = {'deprecation_issued': False} @functools.wraps(func) def new_func(*args, **kwargs): show_deprecation_warning(func.__name__, alt_func_name, text=text) return func(*args, **kwargs) - return new_func return deprecated_decorator -def stringify_path(filepath): +def is_iterable(seq): + """Return True if the given object is an iterable, False otherwise. + + Parameters + ---------- + seq : object + Object to check + + Returns + ------- + bool: + True if object is an iterable """ - Convert path-like object to string form. - Attempt to convert path-like object to a string by coercing objects - supporting the fspath protocol to its ``__fspath__`` method. Anything that - is not path-like, which includes bytes and strings, is passed through - unchanged. + try: + iter(seq) + except TypeError: + return False + + return True + + +def is_coercible_to_sli_array(seq): + """Checks whether a given object is coercible to a SLI array Parameters ---------- - filepath : object - Object representing file system path. + seq : object + Object to check Returns ------- - filepath : str - Stringified filepath. + bool: + True if object is coercible to a SLI array """ - if isinstance(filepath, os.PathLike): - filepath = filepath.__fspath__() # should return str or bytes object - return filepath + import sys + if sys.version_info[0] >= 3: + return isinstance(seq, (tuple, list, range)) + else: + return isinstance(seq, (tuple, list, xrange)) -def is_iterable(seq): - """Return True if the given object is an iterable, False otherwise. + +def is_sequence_of_connections(seq): + """Checks whether low-level API accepts seq as a sequence of + connections. Parameters ---------- @@ -181,15 +213,35 @@ def is_iterable(seq): Returns ------- bool: - True if object is an iterable + True if object is an iterable of dictionaries or + subscriptables of CONN_LEN """ try: - iter(seq) + cnn = next(iter(seq)) + return isinstance(cnn, dict) or len(cnn) == kernel.CONN_LEN except TypeError: - return False + pass - return True + return False + + +def is_sequence_of_node_ids(seq): + """Checks whether the argument is a potentially valid sequence of + node IDs (non-negative integers). + + Parameters + ---------- + seq : object + Object to check + + Returns + ------- + bool: + True if object is a potentially valid sequence of node IDs + """ + + return all(isinstance(n, int) and n >= 0 for n in seq) def broadcast(item, length, allowed_types, name="item"): @@ -219,13 +271,12 @@ def broadcast(item, length, allowed_types, name="item"): """ if isinstance(item, allowed_types): - return length * (item,) + return length * (item, ) elif len(item) == 1: return length * item elif len(item) != length: raise TypeError( - "'{0}' must be a single value, a list with one element or a list with {1} elements.".format(name, length) - ) + "'{0}' must be a single value, a list with one element or a list with {1} elements.".format(name, length)) return item @@ -241,12 +292,9 @@ def __show_help_in_modal_window(obj, help_text): """ help_text = json.dumps(help_text) - style = ( - "" - ) - s = Template( - """ + style = "" + s = Template(""" require( ["base/js/dialog"], function(dialog) { @@ -259,11 +307,9 @@ def __show_help_in_modal_window(obj, help_text): }); } ); - """ - ) + """) from IPython.display import HTML, Javascript, display - display(HTML(style)) display(Javascript(s.substitute(jstitle=obj, jstext=help_text))) @@ -284,8 +330,8 @@ def get_help_fname(obj): File name of the help text for obj """ - docdir = nestkernel.ll_api_get_kernel_status()["docdir"] - help_fname = os.path.join(docdir, "html", "models", f"{obj}.rst") + docdir = sli_func("statusdict/prgdocdir ::") + help_fname = os.path.join(docdir, 'html', 'models', f'{obj}.rst') if os.path.isfile(help_fname): return help_fname @@ -308,7 +354,7 @@ def load_help(obj): """ help_fname = get_help_fname(obj) - with open(help_fname, "r", encoding="utf-8") as help_file: + with open(help_fname, 'r', encoding='utf-8') as help_file: help_text = help_file.read() return help_text @@ -328,14 +374,14 @@ def show_help_with_pager(obj): def check_nb(): try: - return get_ipython().__class__.__name__.startswith("ZMQ") + return get_ipython().__class__.__name__.startswith('ZMQ') except NameError: return False help_text = load_help(obj) if check_nb(): - __show_help_in_modal_window(obj + ".rst", help_text) + __show_help_in_modal_window(obj + '.rst', help_text) return pydoc.pager(help_text) @@ -359,10 +405,9 @@ def model_deprecation_warning(model): """ if model in _deprecation_warning: - if not _deprecation_warning[model]["deprecation_issued"]: - text = ( - "The {0} model is deprecated and will be removed in a future version of NEST, " "use {1} instead." - ).format(model, _deprecation_warning[model]["replacement"]) + if not _deprecation_warning[model]['deprecation_issued']: + text = ("The {0} model is deprecated and will be removed in a future version of NEST, " + "use {1} instead.").format(model, _deprecation_warning[model]['replacement']) show_deprecation_warning(model, text=text) @@ -392,15 +437,15 @@ def restructure_data(result, keys): final_result.append(result_dict[keys]) elif keys in all_keys: final_result.append(None) + final_result = tuple(final_result) else: final_result = result[0][keys] elif is_iterable(keys): - final_result = ( - {key: [val[i] for val in result] for i, key in enumerate(keys)} - if len(result) != 1 - else {key: val[i] for val in result for i, key in enumerate(keys)} - ) + final_result = ({key: [val[i] for val in result] + for i, key in enumerate(keys)} if len(result) != 1 + else {key: val[i] for val in result + for i, key in enumerate(keys)}) elif keys is None: if len(result) != 1: @@ -479,10 +524,10 @@ def get_parameters_hierarchical_addressing(nc, params): # or list of strings. if isinstance(params[0], str): value_list = nc.get(params[0]) - if not isinstance(value_list, tuple): + if type(value_list) != tuple: value_list = (value_list,) else: - raise TypeError("First argument must be a string, specifying path into hierarchical dictionary") + raise TypeError('First argument must be a string, specifying path into hierarchical dictionary') result = restructure_data(value_list, None) @@ -511,22 +556,27 @@ def __init__(self, no_dep_funcs): for which to suppress deprecation warnings """ - self._no_dep_funcs = no_dep_funcs if not isinstance(no_dep_funcs, str) else (no_dep_funcs,) + self._no_dep_funcs = (no_dep_funcs if not isinstance(no_dep_funcs, str) else (no_dep_funcs, )) self._deprecation_status = {} - self._verbosity_level = nestkernel.get_verbosity() + sr('verbosity') # Use sli-version as we cannon import from info because of cirular inclusion problem + self._verbosity_level = spp() def __enter__(self): + for func_name in self._no_dep_funcs: self._deprecation_status[func_name] = _deprecation_warning[func_name] # noqa - _deprecation_warning[func_name]["deprecation_issued"] = True + _deprecation_warning[func_name]['deprecation_issued'] = True # Suppress only if verbosity level is deprecated or lower - if self._verbosity_level <= nestkernel.severity_t.M_DEPRECATED: - nestkernel.set_verbosity(nestkernel.severity_t.M_WARNING) + if self._verbosity_level <= sli_func('M_DEPRECATED'): + # Use sli-version as we cannon import from info because of cirular inclusion problem + sr("{} setverbosity".format(sli_func('M_WARNING'))) def __exit__(self, *args): + # Reset the verbosity level and deprecation warning status - nestkernel.set_verbosity(self._verbosity_level) + sr("{} setverbosity".format((self._verbosity_level))) for func_name, deprec_dict in self._deprecation_status.items(): - _deprecation_warning[func_name]["deprecation_issued"] = deprec_dict["deprecation_issued"] + _deprecation_warning[func_name]['deprecation_issued'] = ( + deprec_dict['deprecation_issued']) diff --git a/pynest/nest/lib/hl_api_info.py b/pynest/nest/lib/hl_api_info.py index 179f85768d..3b9a0c8d1d 100644 --- a/pynest/nest/lib/hl_api_info.py +++ b/pynest/nest/lib/hl_api_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_info.py +# _hl_api_info.py # # This file is part of NEST. # @@ -23,25 +23,42 @@ Functions to get information on NEST. """ +import sys import os import textwrap import webbrowser -import nest - +from ._hl_api_helper import broadcast, is_iterable, load_help, show_help_with_pager +from ._hl_api_types import to_json from .. import nestkernel_api as nestkernel -from .hl_api_helper import broadcast, is_iterable, load_help, show_help_with_pager -from .hl_api_types import to_json +import nest __all__ = [ - "get_verbosity", - "help", - "helpdesk", - "set_verbosity", - "verbosity", + 'authors', + 'get_argv', + 'get_verbosity', + 'help', + 'helpdesk', + 'message', + 'set_verbosity', + 'sysinfo', ] -verbosity = nestkernel.severity_t + +def sysinfo(): + """Print information on the platform on which NEST was compiled. + + """ + + sr("sysinfo") + + +def authors(): + """Print the authors of NEST. + + """ + + sr("authors") def helpdesk(): @@ -56,8 +73,8 @@ def helpdesk(): """ - docdir = nestkernel.ll_api_get_kernel_status()["docdir"] - help_fname = os.path.join(docdir, "html", "index.html") + docdir = sli_func("statusdict/prgdocdir ::") + help_fname = os.path.join(docdir, 'html', 'index.html') if not os.path.isfile(help_fname): msg = "Sorry, the help index cannot be opened. " @@ -98,17 +115,51 @@ def help(obj=None, return_text=False): else: show_help_with_pager(obj) except FileNotFoundError: - print( - textwrap.dedent( - f""" + print(textwrap.dedent(f""" Sorry, there is no help for model '{obj}'. - Use the Python help() function to obtain help on PyNEST functions.""" - ) - ) + Use the Python help() function to obtain help on PyNEST functions.""")) else: print(nest.__doc__) +def get_argv(): + """Return argv as seen by NEST. + + This is similar to Python :code:`sys.argv` but might have changed after + MPI initialization. + + Returns + ------- + tuple + Argv, as seen by NEST + + """ + + sr('statusdict') + statusdict = spp() + return statusdict['argv'] + + +def message(level, sender, text): + """Print a message using message system of NEST. + + Parameters + ---------- + level : + Level + sender : + Message sender + text : str + Text to be sent in the message + + """ + + sps(level) + sps(sender) + sps(text) + sr('message') + + def get_verbosity(): """Return verbosity level of NEST's messages. @@ -121,11 +172,12 @@ def get_verbosity(): Returns ------- - severity_t: + int: The current verbosity level """ - return nestkernel.llapi_get_verbosity() + sr('verbosity') + return spp() def set_verbosity(level): @@ -146,11 +198,14 @@ def set_verbosity(level): Parameters ---------- - level : severity_t, default: 'M_ALL' - Can be one of the values of the nest.verbosity enum. + level : str, default: 'M_INFO' + Can be one of 'M_FATAL', 'M_ERROR', 'M_WARNING', 'M_DEPRECATED', + 'M_INFO' or 'M_ALL'. """ - if type(level) is not verbosity: - raise TypeError('"level" must be a value of the nest.verbosity enum.') - - nestkernel.llapi_set_verbosity(level) + # TODO-PYNEST-NG: There are no SLI messages anymore, so verbosity + # is now irrelevant and should be replaced when a + # replacement for message() exists. + + # sr("{} setverbosity".format(level)) + pass diff --git a/pynest/nest/lib/hl_api_models.py b/pynest/nest/lib/hl_api_models.py index ce3be14357..468bc7c7dc 100644 --- a/pynest/nest/lib/hl_api_models.py +++ b/pynest/nest/lib/hl_api_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_models.py +# _hl_api_models.py # # This file is part of NEST. # @@ -23,10 +23,10 @@ Functions for model handling """ +from .._ll_api import * from .. import nestkernel_api as nestkernel -from ..ll_api import * -from .hl_api_helper import deprecated, is_iterable, model_deprecation_warning -from .hl_api_types import to_json +from ._hl_api_helper import deprecated, is_iterable, model_deprecation_warning +from ._hl_api_types import to_json __all__ = [ "ConnectionRules", @@ -38,6 +38,7 @@ @deprecated("nest.node_models or nest.synapse_models") +@check_stack def Models(mtype="all", sel=None): """Return a tuple of neuron, device, or synapse model names. @@ -89,6 +90,7 @@ def Models(mtype="all", sel=None): @deprecated("nest.connection_rules") +@check_stack def ConnectionRules(): """Return a tuple of all available connection rules, sorted by name. @@ -102,6 +104,7 @@ def ConnectionRules(): return tuple(sorted(GetKernelStatus("connection_rules"))) +@check_stack def SetDefaults(model, params, val=None): """Set defaults for the given model or recording backend. @@ -126,6 +129,7 @@ def SetDefaults(model, params, val=None): nestkernel.llapi_set_defaults(model, params) +@check_stack def GetDefaults(model, keys=None, output=""): """Return defaults of the given model or recording backend. @@ -173,6 +177,7 @@ def GetDefaults(model, keys=None, output=""): return result +@check_stack def CopyModel(existing, new, params=None): """Create a new model by copying an existing one. diff --git a/pynest/nest/lib/hl_api_nodes.py b/pynest/nest/lib/hl_api_nodes.py index 674d4aaffc..8da5ed0bce 100644 --- a/pynest/nest/lib/hl_api_nodes.py +++ b/pynest/nest/lib/hl_api_nodes.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_nodes.py +# _hl_api_nodes.py # # This file is part of NEST. # @@ -26,13 +26,11 @@ import warnings import nest - +from .._ll_api import * +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ..ll_api import * -from .hl_api_exceptions import NESTErrors -from .hl_api_helper import is_iterable, model_deprecation_warning -from .hl_api_parallel_computing import NumProcesses, Rank -from .hl_api_types import NodeCollection, Parameter +from ._hl_api_helper import is_iterable, model_deprecation_warning +from ._hl_api_types import NodeCollection, Parameter __all__ = [ "Create", @@ -65,14 +63,13 @@ def Create(model, n=1, params=None, positions=None): params : dict or list, optional Parameters for the new nodes. Can be any of the following: - - A dictionary with either single values or lists of size n. - The single values will be applied to all nodes, while the lists will be distributed across - the nodes. Both single values and lists can be given at the same time. - - A list with n dictionaries, one dictionary for each node. - + - A dictionary with either single values or lists of size n. + The single values will be applied to all nodes, while the lists will be distributed across + the nodes. Both single values and lists can be given at the same time. + - A list with n dictionaries, one dictionary for each node. Values may be :py:class:`.Parameter` objects. If omitted, the model's defaults are used. - positions: :py:class:`.grid` or :py:class:`.free` object, optional + positions: :py:class:`.spatial.grid` or :py:class:`.spatial.free` object, optional Object describing spatial positions of the nodes. If omitted, the nodes have no spatial attachment. Returns @@ -101,15 +98,9 @@ def Create(model, n=1, params=None, positions=None): # PYNEST-NG: can we support the usecase above by passing the dict into ll_create? if isinstance(params, dict) and params: # if params is a dict and not empty - iterable_or_parameter_in_params = any(is_iterable(v) or isinstance(v, Parameter) for k, v in params.items()) - - if isinstance(params, (list, tuple)) and len(params) != n: - raise TypeError("list of params must have one dictionary per node") - - if params is not None and not ( - isinstance(params, dict) or (isinstance(params, (list, tuple)) and all(isinstance(e, dict) for e in params)) - ): - raise TypeError("params must be either a dict of parameters or a list or tuple of dicts") + iterable_or_parameter_in_params = any( + is_iterable(v) or isinstance(v, Parameter) for k, v in params.items() + ) if positions is not None: # Explicitly retrieve lazy loaded spatial property from the module class. @@ -117,7 +108,9 @@ def Create(model, n=1, params=None, positions=None): spatial = getattr(nest.NestModule, "spatial") # We only accept positions as either a free object or a grid object. if not isinstance(positions, (spatial.free, spatial.grid)): - raise TypeError("`positions` must be either a nest.spatial.free or a nest.spatial.grid object") + raise TypeError( + "`positions` must be either a nest.spatial.free or a nest.spatial.grid object" + ) layer_specs = {"elements": model} layer_specs["edge_wrap"] = positions.edge_wrap if isinstance(positions, spatial.free): @@ -128,12 +121,14 @@ def Create(model, n=1, params=None, positions=None): else: # If positions is not a free object, it must be a grid object. if n > 1: - raise NESTErrors.NESTError("Cannot specify number of nodes with grid positions") + raise kernel.NESTError( + "Cannot specify number of nodes with grid positions" + ) layer_specs["shape"] = positions.shape if positions.center is not None: - layer_specs["center"] = [float(v) for v in positions.center] + layer_specs["center"] = positions.center if positions.extent is not None: - layer_specs["extent"] = [float(v) for v in positions.extent] + layer_specs["extent"] = positions.extent layer = nestkernel.llapi_create_spatial(layer_specs) layer.set(params if params else {}) @@ -141,8 +136,7 @@ def Create(model, n=1, params=None, positions=None): node_ids = nestkernel.llapi_create(model, n) - if (isinstance(params, dict) and params) or isinstance(params, (list, tuple)): - # if params is a dict and not empty or a list of dicts + if isinstance(params, dict) and params: # if params is a dict and not empty try: node_ids.set(params) except Exception: @@ -205,7 +199,9 @@ def GetLocalNodeCollection(nc): Object representing the local nodes of the given `NodeCollection` """ if not isinstance(nc, NodeCollection): - raise TypeError("GetLocalNodeCollection requires a NodeCollection in order to run") + raise TypeError( + "GetLocalNodeCollection requires a NodeCollection in order to run" + ) rank = Rank() num_procs = NumProcesses() diff --git a/pynest/nest/lib/hl_api_parallel_computing.py b/pynest/nest/lib/hl_api_parallel_computing.py index 44d489043a..608268292c 100644 --- a/pynest/nest/lib/hl_api_parallel_computing.py +++ b/pynest/nest/lib/hl_api_parallel_computing.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_parallel_computing.py +# _hl_api_parallel_computing.py # # This file is part of NEST. # @@ -23,8 +23,9 @@ Functions for parallel computing """ +from .._ll_api import * +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ..ll_api import * __all__ = [ "NumProcesses", @@ -36,6 +37,7 @@ ] +@check_stack def Rank(): """Return the MPI rank of the local process. @@ -55,9 +57,10 @@ def Rank(): may complete but generate nonsensical results. """ - return nestkernel.llapi_get_kernel_status()["mpi_rank"] + return nestkernel.llapi_get_rank() +@check_stack def NumProcesses(): """Return the overall number of MPI processes. @@ -67,9 +70,10 @@ def NumProcesses(): Number of overall MPI processes """ - return nestkernel.llapi_get_kernel_status()["num_processes"] + return nestkernel.llapi_get_num_mpi_processes() +@check_stack def SetAcceptableLatency(port_name, latency): """Set the acceptable `latency` (in ms) for a MUSIC port. @@ -81,13 +85,12 @@ def SetAcceptableLatency(port_name, latency): Latency in ms """ - # PYNEST-NG - # sps(kernel.SLILiteral(port_name)) - # sps(latency) - # sr("SetAcceptableLatency") - pass + sps(kernel.SLILiteral(port_name)) + sps(latency) + sr("SetAcceptableLatency") +@check_stack def SetMaxBuffered(port_name, size): """Set the maximum buffer size for a MUSIC port. @@ -99,27 +102,24 @@ def SetMaxBuffered(port_name, size): Buffer size """ - # PYNEST-NG - # sps(kernel.SLILiteral(port_name)) - # sps(size) - # sr("SetMaxBuffered") - pass + sps(kernel.SLILiteral(port_name)) + sps(size) + sr("SetMaxBuffered") +@check_stack def SyncProcesses(): """Synchronize all MPI processes.""" - # PYNEST-NG - # sr("SyncProcesses") - pass + sr("SyncProcesses") +@check_stack def GetLocalVPs(): """Return iterable representing the VPs local to the MPI rank.""" # Compute local VPs as range based on round-robin logic in # VPManager::get_vp(). mpitest_get_local_vps ensures this is in # sync with the kernel. - - n_vp = nestkernel.llapi_get_kernel_status()["total_num_virtual_procs"] + n_vp = sli_func("GetKernelStatus /total_num_virtual_procs get") return range(Rank(), n_vp, NumProcesses()) diff --git a/pynest/nest/lib/hl_api_simulation.py b/pynest/nest/lib/hl_api_simulation.py index b50bc6e798..c75a4ce10e 100644 --- a/pynest/nest/lib/hl_api_simulation.py +++ b/pynest/nest/lib/hl_api_simulation.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_simulation.py +# _hl_api_simulation.py # # This file is part of NEST. # @@ -23,13 +23,15 @@ Functions for simulation control """ -import warnings from contextlib import contextmanager +import warnings +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ..ll_api import * -from .hl_api_helper import is_iterable -from .hl_api_parallel_computing import Rank + +from .._ll_api import * +from ._hl_api_helper import is_iterable +from ._hl_api_parallel_computing import Rank __all__ = [ "Cleanup", @@ -46,11 +48,10 @@ ] +@check_stack def Simulate(t): """Simulate the network for `t` milliseconds. - `Simulate(t)` runs `Prepare()`, `Run(t)`, and `Cleanup()` in this order. - Parameters ---------- t : float @@ -58,13 +59,14 @@ def Simulate(t): See Also -------- - RunManager, Prepare, Run, Cleanup + RunManager """ nestkernel.llapi_simulate(t) +@check_stack def Run(t): """Simulate the network for `t` milliseconds. @@ -77,7 +79,9 @@ def Run(t): ------ Call between `Prepare` and `Cleanup` calls, or within a - ``with RunManager`` clause. `Run(t)` is called once by each call to `Simulate(t)`. + ``with RunManager`` clause. + + Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup() `Prepare` must be called before `Run` to calibrate the system, and `Cleanup` must be called after `Run` to close files, cleanup handles, and @@ -101,30 +105,28 @@ def Run(t): nestkernel.llapi_run(t) +@check_stack def Prepare(): - """Calibrate the system before a `Run` call. - - `Prepare` is automatically called by `Simulate` and `RunManager`. + """Calibrate the system before a `Run` call. Not needed for `Simulate`. See Also -------- - Run, Cleanup, Simulate, RunManager + Run, Cleanup """ nestkernel.llapi_prepare() +@check_stack def Cleanup(): - """Cleans up resources after a `Run` calls. - - `Cleanup` is automatically called by `Simulate` and `RunManager`. + """Cleans up resources after a `Run` call. Not needed for `Simulate`. Closes state for a series of runs, such as flushing and closing files. A `Prepare` is needed after a `Cleanup` before any more calls to `Run`. See Also -------- - Run, Prepare, Simulate, RunManager + Run, Prepare """ nestkernel.llapi_cleanup() @@ -167,6 +169,7 @@ def RunManager(): Cleanup() +@check_stack def ResetKernel(): """Reset the simulation kernel. @@ -178,12 +181,10 @@ def ResetKernel(): * all network nodes * all connections * all user-defined neuron and synapse models - are deleted, and * time * random generators - are reset. The only exception is that dynamically loaded modules are not unloaded. This may change in a future version of NEST. @@ -191,6 +192,7 @@ def ResetKernel(): nestkernel.llapi_reset_kernel() +@check_stack def SetKernelStatus(params): """Set parameters for the simulation kernel. @@ -215,6 +217,7 @@ def SetKernelStatus(params): # the module level, but have to have it on the function level. import nest # noqa + # TODO-PYNEST-NG: Enable again when KernelAttribute works raise_errors = params.get("dict_miss_is_error", nest.dict_miss_is_error) valids = nest._kernel_attr_names readonly = nest._readonly_kernel_attrs @@ -239,6 +242,7 @@ def SetKernelStatus(params): nestkernel.llapi_set_kernel_status(params) +@check_stack def GetKernelStatus(keys=None): """Obtain parameters of the simulation kernel. @@ -286,6 +290,7 @@ def GetKernelStatus(keys=None): raise TypeError("keys should be either a string or an iterable") +@check_stack def Install(module_name): """Load a dynamically linked NEST module. @@ -315,6 +320,7 @@ def Install(module_name): return sr("(%s) Install" % module_name) +@check_stack def EnableStructuralPlasticity(): """Enable structural plasticity for the network simulation @@ -324,9 +330,10 @@ def EnableStructuralPlasticity(): """ - nestkernel.llapi_enable_structural_plasticity() + sr("EnableStructuralPlasticity") +@check_stack def DisableStructuralPlasticity(): """Disable structural plasticity for the network simulation @@ -335,4 +342,4 @@ def DisableStructuralPlasticity(): EnableStructuralPlasticity """ - nestkernel.llapi_disable_structural_plasticity() + sr("DisableStructuralPlasticity") diff --git a/pynest/nest/lib/hl_api_sonata.py b/pynest/nest/lib/hl_api_sonata.py deleted file mode 100644 index f3757710a2..0000000000 --- a/pynest/nest/lib/hl_api_sonata.py +++ /dev/null @@ -1,666 +0,0 @@ -# -*- coding: utf-8 -*- -# -# hl_api_sonata.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions to build and simulate networks with the SONATA format -""" - - -import itertools -import json -import os -from pathlib import Path, PurePath - -import numpy as np - -from .. import nestkernel_api as nestkernel -from .hl_api_models import GetDefaults -from .hl_api_nodes import Create -from .hl_api_simulation import GetKernelStatus, SetKernelStatus, Simulate -from .hl_api_types import NodeCollection - -try: - import pandas as pd - - have_pandas = True -except ImportError: - have_pandas = False - -try: - import h5py - - have_h5py = True -except ImportError: - have_h5py = False - -have_hdf5 = GetKernelStatus("build_info")["have_hdf5"] - -__all__ = ["SonataNetwork"] - - -class SonataNetwork: - """Class for building and simulating networks represented by the SONATA format. - - ``SonataNetwork`` provides native NEST support for building and simulating - network models represented by the SONATA format. In the SONATA format, - information about nodes, edges (synapses) and their respective properties - are stored in the table-based file formats HDF5 and CSV. Model metadata, - such as the path relation between files on disk and simulation parameters, - are stored in JSON configuration files. See the :ref:`nest_sonata` for details - on the NEST support of the SONATA format. - - The constructor takes the JSON configuration file specifying the paths to - the HDF5 and CSV files describing the network. In case simulation - parameters are stored in a separate JSON configuration file, the - constructor also has the option to pass a second configuration file. - - Parameters - ---------- - config : [str | pathlib.Path | pathlib.PurePath] - String or pathlib object describing the path to the JSON - configuration file. - sim_config : [str | pathlib.Path | pathlib.PurePath], optional - String or pathlib object describing the path to a JSON configuration - file containing simulation parameters. This is only needed if simulation - parameters are given in a separate configuration file. - - Example - ------- - :: - - import nest - - nest.ResetKernel() - - # Instantiate SonataNetwork - sonata_net = nest.SonataNetwork("path/to/config.json") - - # Create and connect nodes - node_collections = sonata_net.BuildNetwork() - - # Connect spike recorder to a population - s_rec = nest.Create("spike_recorder") - nest.Connect(node_collections["name_of_population_to_record"], s_rec) - - # Simulate the network - sonata_net.Simulate() - """ - - def __init__(self, config, sim_config=None): - if not have_hdf5: - msg = "SonataNetwork unavailable because NEST was compiled without HDF5 support" - raise nestkernel.NESTError(msg) - if not have_h5py: - msg = "SonataNetwork unavailable because h5py is not installed or could not be imported" - raise nestkernel.NESTError(msg) - if not have_pandas: - msg = "SonataNetwork unavailable because pandas is not installed or could not be imported" - raise nestkernel.NESTError(msg) - - self._node_collections = {} - self._edges_maps = [] - self._hyperslab_size_default = 2**20 - - self._are_nodes_created = False - self._is_network_built = False - - self._conf = self._parse_config(config) - if sim_config is not None: - self._conf.update(self._parse_config(sim_config)) - - if self._conf["target_simulator"] != "NEST": - msg = "'target_simulator' in configuration file must be 'NEST'." - raise ValueError(msg) - - if "dt" not in self._conf["run"]: - msg = "Time resolution 'dt' must be specified in configuration file" - raise ValueError(msg) - - SetKernelStatus({"resolution": self._conf["run"]["dt"]}) - - def _parse_config(self, config): - """Parse JSON configuration file. - - Parse JSON config file and convert to a dictionary containing - absolute paths and simulation parameters. - - Parameters - ---------- - config : [str | pathlib.Path | pathlib.PurePath] - String or pathlib object describing the path to the JSON - configuration file. - - Returns - ------- - dict - SONATA config as dictionary - """ - - if not isinstance(config, (str, PurePath, Path)): - msg = "Path to JSON configuration file must be passed as str, pathlib.PurePath or pathlib.Path" - raise TypeError(msg) - - # Get absolute path - conf_path = Path(config).resolve(strict=True) - base_path = conf_path.parent - - with open(conf_path) as fp: - conf = json.load(fp) - - # Replace path variables (e.g. $MY_DIR) with absolute paths in manifest - for k, v in conf["manifest"].copy().items(): - if "$BASE_DIR" in v: - v = v.replace("$BASE_DIR", ".") - conf["manifest"].update({k: base_path.joinpath(v).as_posix()}) - - if k.startswith("$"): - conf["manifest"][k[1:]] = conf["manifest"].pop(k) - - def recursive_substitutions(config_obj): - # Recursive substitutions of path variables with entries from manifest - if isinstance(config_obj, dict): - return {k: recursive_substitutions(v) for k, v in config_obj.items()} - elif isinstance(config_obj, list): - return [recursive_substitutions(e) for e in config_obj] - elif isinstance(config_obj, str) and config_obj.startswith("$"): - for dir, path in conf["manifest"].items(): - config_obj = config_obj.replace(dir, path) - return config_obj[1:] - return config_obj - - conf.update(recursive_substitutions(conf)) - - return conf - - def Create(self): - """Create the SONATA network nodes. - - Creates the network nodes. In the SONATA format, node populations are - serialized in node HDF5 files. Each node in a population has a node - type. Each node population has a single associated node types CSV file - that assigns properties to all nodes with a given node type. - - Please note that it is assumed that all relevant node properties are - stored in the node types CSV file. For neuron nodes, the relevant - properties are model type, model template and reference to a JSON - file describing the parametrization. - - Returns - ------- - node_collections : dict - A dictionary containing the created :py:class:`.NodeCollection` - for each population. The population names are keys. - """ - - # Iterate node config files - for nodes_conf in self._conf["networks"]["nodes"]: - csv_fn = nodes_conf["node_types_file"] - nodes_df = pd.read_csv(csv_fn, sep=r"\s+") - - # Require only one model type per CSV file - model_types_arr = nodes_df["model_type"].to_numpy() - is_one_model_type = (model_types_arr[0] == model_types_arr).all() - - if not is_one_model_type: - msg = f"Only one model type per node types CSV file is supported. {csv_fn} contains more than one." - raise ValueError(msg) - - model_type = model_types_arr[0] - - if model_type in ["point_neuron", "point_process"]: - self._create_neurons(nodes_conf, nodes_df, csv_fn) - elif model_type == "virtual": - self._create_spike_train_injectors(nodes_conf) - else: - msg = f"Model type '{model_type}' in {csv_fn} is not supported by NEST." - raise ValueError(msg) - - self._are_nodes_created = True - - return self._node_collections - - def _create_neurons(self, nodes_conf, nodes_df, csv_fn): - """Create neuron nodes. - - Parameters - ---------- - nodes_conf : dict - Config as dictionary specifying filenames - nodes_df : pandas.DataFrame - Associated node CSV table as dataframe - csv_fn : str - Name of current CSV file. Used for more informative error messages. - """ - - node_types_map = self._create_node_type_parameter_map(nodes_df, csv_fn) - - models_arr = nodes_df["model_template"].to_numpy() - is_one_model = (models_arr[0] == models_arr).all() - one_model_name = models_arr[0] if is_one_model else None - - with h5py.File(nodes_conf["nodes_file"], "r") as nodes_h5f: - # Iterate node populations in current node.h5 file - for pop_name in nodes_h5f["nodes"]: - node_type_id_dset = nodes_h5f["nodes"][pop_name]["node_type_id"][:] - - if is_one_model: - nest_nodes = Create(one_model_name, n=node_type_id_dset.size) - node_type_ids, inv_ind = np.unique(node_type_id_dset, return_inverse=True) - - # Extract node parameters - for i, node_type_id in enumerate(node_type_ids): - params_path = PurePath( - self._conf["components"]["point_neuron_models_dir"], - node_types_map[node_type_id]["dynamics_params"], - ) - - with open(params_path) as fp: - params = json.load(fp) - - nest_nodes[inv_ind == i].set(params) - else: - # More than one NEST neuron model in CSV file - - # TODO: Utilizing np.unique(node_type_id_dset, return_=...) - # with the different return options might be more efficient - - nest_nodes = NodeCollection() - for k, g in itertools.groupby(node_type_id_dset): - # k is a node_type_id key - # g is an itertools group object - # len(list(g)) gives the number of consecutive occurrences of the current k - model = node_types_map[k]["model_template"] - n_nrns = len(list(g)) - params_path = PurePath( - self._conf["components"]["point_neuron_models_dir"], - node_types_map[k]["dynamics_params"], - ) - with open(params_path) as fp: - params = json.load(fp) - - nest_nodes += Create(model, n=n_nrns, params=params) - - self._node_collections[pop_name] = nest_nodes - - def _create_spike_train_injectors(self, nodes_conf): - """Create spike train injector nodes. - - Parameters - ---------- - nodes_conf : dict - Config as dictionary specifying filenames - """ - - with h5py.File(nodes_conf["nodes_file"], "r") as nodes_h5f: - for pop_name in nodes_h5f["nodes"]: - node_type_id_dset = nodes_h5f["nodes"][pop_name]["node_type_id"] - n_nodes = node_type_id_dset.size - - input_file = None - for inputs_dict in self._conf["inputs"].values(): - if inputs_dict["node_set"] == pop_name: - input_file = inputs_dict["input_file"] - break # Break once we found the matching population - - if input_file is None: - msg = f"Could not find an input file for population {pop_name} in config file." - raise ValueError(msg) - - with h5py.File(input_file, "r") as input_h5f: - # Deduce the HDF5 file structure - all_groups = all([isinstance(g, h5py.Group) for g in input_h5f["spikes"].values()]) - any_groups = any([isinstance(g, h5py.Group) for g in input_h5f["spikes"].values()]) - if (all_groups or any_groups) and not (all_groups and any_groups): - msg = ( - "Unsupported HDF5 structure; groups and " - "datasets cannot be on the same hierarchical " - f"level in input spikes file {input_file}" - ) - raise ValueError(msg) - - if all_groups: - if pop_name in input_h5f["spikes"].keys(): - spikes_grp = input_h5f["spikes"][pop_name] - else: - msg = f"Did not find a matching HDF5 group name for population {pop_name} in {input_file}" - raise ValueError(msg) - else: - spikes_grp = input_h5f["spikes"] - - if "gids" in spikes_grp: - node_ids = spikes_grp["gids"][:] - elif "node_ids" in spikes_grp: - node_ids = spikes_grp["node_ids"][:] - else: - msg = f"No dataset called 'gids' or 'node_ids' in {input_file}" - raise ValueError(msg) - - timestamps = spikes_grp["timestamps"][:] - - # Map node id's to spike times - # TODO: Can this be done in a more efficient way? - spikes_map = {node_id: timestamps[node_ids == node_id] for node_id in range(n_nodes)} - params_lst = [ - {"spike_times": spikes_map[node_id], "allow_offgrid_times": True} for node_id in range(n_nodes) - ] - - # Create and store NC - nest_nodes = Create("spike_train_injector", n=n_nodes, params=params_lst) - self._node_collections[pop_name] = nest_nodes - - def _create_node_type_parameter_map(self, nodes_df, csv_fn): - """Create map between node type id and node properties. - - For neuron models, each node type id in the node types CSV file has: - * A model template which describes the name of the neuron model - * A reference to a JSON file describing the neuron's parametrization - - This function creates a map of the above node properties with the - node type id as key. - - Parameters - ---------- - nodes_df : pandas.DataFrame - Node type CSV table as dataframe. - csv_fn : str - Name of current CSV file. Used for more informative error messages. - - Returns - ------- - dict : - Map of node properties for the different node type ids. - """ - - if "model_template" not in nodes_df.columns: - msg = f"Missing the required 'model_template' header specifying NEST neuron models in {csv_fn}." - raise ValueError(msg) - - if "dynamics_params" not in nodes_df.columns: - msg = ( - "Missing the required 'dynamics_params' header specifying " - f".json files with model parameters in {csv_fn}" - ) - raise ValueError(msg) - - nodes_df["model_template"] = nodes_df["model_template"].str.replace("nest:", "") - - req_cols = ["model_template", "dynamics_params"] - node_types_map = nodes_df.set_index("node_type_id")[req_cols].to_dict(orient="index") - - return node_types_map - - def Connect(self, hdf5_hyperslab_size=None): - """Connect the SONATA network nodes. - - The connections are created by first parsing the edge (synapse) CSV - files to create a map of synaptic properties on the Python level. This - is then sent to the NEST kernel together with the edge HDF5 files to - create the connections. - - For large networks, the edge HDF5 files might not fit into memory in - their entirety. In the NEST kernel, the edge HDF5 datasets are therefore - read sequentially as blocks of contiguous hyperslabs. The hyperslab size - is modifiable so that the user is able to achieve a balance between - the number of read operations and memory overhead. - - Parameters - ---------- - hdf5_hyperslab_size : int, optional - Size of the hyperslab to read in one read operation. The hyperslab - size is applied to all HDF5 datasets that need to be read in order - to create the connections. Default: ``2**20``. - """ - - if not self._are_nodes_created: - msg = "The SONATA network nodes must be created before any connections can be made" - raise nestkernel.NESTError(msg) - - if hdf5_hyperslab_size is None: - hdf5_hyperslab_size = self._hyperslab_size_default - - self._verify_hyperslab_size(hdf5_hyperslab_size) - - graph_specs = self._create_graph_specs() - - # Check whether HDF5 files exist and are not blocked. - for d in graph_specs["edges"]: - try: - f = h5py.File(d["edges_file"], "r") - f.close() - except BlockingIOError as err: - raise BlockingIOError(f"{err.strerror} for {os.path.realpath(d['edges_file'])}") from None - - sps(graph_specs) - sps(hdf5_hyperslab_size) - sr("ConnectSonata") - - self._is_network_built = True - - def _verify_hyperslab_size(self, hyperslab_size): - """Check if provided hyperslab size is valid.""" - - if not isinstance(hyperslab_size, int): - raise TypeError("hdf5_hyperslab_size must be passed as int") - if hyperslab_size <= 0: - raise ValueError("hdf5_hyperslab_size must be strictly positive") - - def _create_graph_specs(self): - """Create graph specifications dictionary. - - The graph specifications (`graph_specs`) dictionary is passed to - the kernel where the connections are created. `graph_specs` has the - following structure: - - { - "nodes": - { - "": NodeCollection, - "": NodeCollection, - ... - }, - "edges": - [ - {"edges_file": '', - "syn_specs": {"": syn_spec, - "": syn_spec, - ... - } - }, - {"edges_file": '', - "syn_specs": {"": syn_spec, - "": syn_spec, - ... - } - }, - ... - ] - } - - Returns - ------- - dict : - Map of SONATA graph specifications. - """ - - self._create_edges_maps() - graph_specs = {"nodes": self._node_collections, "edges": self._edges_maps} - return graph_specs - - def _create_edges_maps(self): - """Create a collection of maps of edge properties. - - Creates a map between edge type id and edge (synapse) properties for - each edge CSV file. The associated edge HDF5 filename is included in - the map as well. - """ - - # Iterate edge config files - for edges_conf in self._conf["networks"]["edges"]: - edges_map = {} - edges_csv_fn = edges_conf["edge_types_file"] - edges_df = pd.read_csv(edges_csv_fn, sep=r"\s+") - - if "model_template" not in edges_df.columns: - msg = f"Missing the required 'model_template' header specifying NEST synapse models in {edges_csv_fn}." - raise ValueError(msg) - - # Rename column labels to names used by NEST. Note that rename - # don't throw an error for extra labels (we want this behavior) - edges_df.rename( - columns={"model_template": "synapse_model", "syn_weight": "weight"}, - inplace=True, - ) - - edges_df_cols = set(edges_df.columns) - - # If 'dynamics_params' is specified, additional synapse - # parameters may be given in a .json file - have_dynamics = "dynamics_params" in edges_df.columns - - # Extract synapse models in the edge CSV file and check if - # only one model is present; we can then use a more efficient - # procedure for extracting the syn_specs. - models_arr = edges_df["synapse_model"].to_numpy() - is_one_model = (models_arr[0] == models_arr).all() - - if is_one_model: - # Only one model in the edge CSV file - - synapse_model = models_arr[0] - # Find set of settable parameters for synapse model - settable_params = set([*GetDefaults(synapse_model)]) - # Parameters to extract (elements common to both sets) - extract_cols = list(settable_params & edges_df_cols) - if have_dynamics: - extract_cols.append("dynamics_params") - - # Extract syn_spec for each edge type - syn_specs = edges_df.set_index("edge_type_id")[extract_cols].to_dict(orient="index") - - if have_dynamics: - # Include parameters from JSON file in the syn_spec - for edge_type_id, syn_spec in syn_specs.copy().items(): - params_path = PurePath( - self._conf["components"]["synaptic_models_dir"], - syn_spec["dynamics_params"], - ) - with open(params_path) as fp: - params = json.load(fp) - - syn_specs[edge_type_id].update(params) - syn_specs[edge_type_id].pop("dynamics_params") - else: - # More than one synapse model in CSV file; in this case we - # must iterate each row in the CSV table. For each row, - # we extract the syn_spec associated with the specified model - - syn_specs = {} - idx_map = {k: i for i, k in enumerate(list(edges_df), start=1)} - - for row in edges_df.itertuples(name=None): - # Set of settable parameters - settable_params = set([*GetDefaults(row[idx_map["synapse_model"]])]) - # Parameters to extract (elements common to both sets) - extract_cols = list(settable_params & edges_df_cols) - syn_spec = {k: row[idx_map[k]] for k in extract_cols} - - if have_dynamics: - # Include parameters from JSON file in the map - params_path = PurePath( - self._conf["components"]["synaptic_models_dir"], - row[idx_map["dynamics_params"]], - ) - - with open(params_path) as fp: - params = json.load(fp) - - syn_spec.update(params) - - syn_specs[row[idx_map["edge_type_id"]]] = syn_spec - - # Create edges map - edges_map["syn_specs"] = syn_specs - edges_map["edges_file"] = edges_conf["edges_file"] - self._edges_maps.append(edges_map) - - def BuildNetwork(self, hdf5_hyperslab_size=None): - """Build SONATA network. - - Convenience function for building the SONATA network. The function - first calls the membership function :py:func:`Create()` to create the - network nodes and then the membership function :py:func:`Connect()` - to create their connections. - - For more details, see :py:func:`Create()` and :py:func:`Connect()`. - - Parameters - ---------- - hdf5_hyperslab_size : int, optional - Size of hyperslab that is read into memory in one read operation. - Applies to all HDF5 datasets relevant for creating the connections. - Default: ``2**20``. - - Returns - ------- - node_collections : dict - A dictionary containing the created :py:class:`.NodeCollection` - for each population. The population names are keys. - """ - - if hdf5_hyperslab_size is not None: - # Chunk size is verfified in Connect, but we also verify here - # to save computational resources in case of wrong input - self._verify_hyperslab_size(hdf5_hyperslab_size) - - node_collections = self.Create() - self.Connect(hdf5_hyperslab_size=hdf5_hyperslab_size) - - return node_collections - - def Simulate(self): - """Simulate the SONATA network. - - The simulation time and resolution are expected to be provided in the - JSON configuration file. - """ - - # Verify that network is built - if not self._is_network_built: - msg = "The SONATA network must be built before a simulation can be done" - raise nestkernel.NESTError(msg) - - if "tstop" in self._conf["run"]: - T_sim = self._conf["run"]["tstop"] - elif "duration" in self._conf["run"]: - T_sim = self._conf["run"]["duration"] - else: - msg = "Simulation time 'tstop' or 'duration' must be specified in configuration file" - raise ValueError(msg) - - Simulate(T_sim) - - @property - def node_collections(self): - return self._node_collections - - @property - def config(self): - return self._conf diff --git a/pynest/nest/lib/hl_api_spatial.py b/pynest/nest/lib/hl_api_spatial.py index 4cc224a006..6d474efb7c 100644 --- a/pynest/nest/lib/hl_api_spatial.py +++ b/pynest/nest/lib/hl_api_spatial.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_spatial.py +# _hl_api_spatial.py # # This file is part of NEST. # @@ -23,43 +23,42 @@ Functions relating to spatial properties of nodes """ -import os import numpy as np +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from .hl_api_connections import GetConnections -from .hl_api_helper import is_iterable, stringify_path -from .hl_api_parallel_computing import NumProcesses, Rank -from .hl_api_types import NodeCollection +from ._hl_api_helper import is_iterable +from ._hl_api_connections import GetConnections +from ._hl_api_parallel_computing import NumProcesses, Rank +from ._hl_api_types import NodeCollection try: import matplotlib as mpl - import matplotlib.patches as mpatches import matplotlib.path as mpath - + import matplotlib.patches as mpatches HAVE_MPL = True except ImportError: HAVE_MPL = False __all__ = [ - "CreateMask", - "Displacement", - "Distance", - "DumpLayerConnections", - "DumpLayerNodes", - "FindCenterElement", - "FindNearestElement", - "GetPosition", - "GetTargetNodes", - "GetSourceNodes", - "GetTargetPositions", - "GetSourcePositions", - "PlotLayer", - "PlotProbabilityParameter", - "PlotTargets", - "PlotSources", - "SelectNodesByMask", + 'CreateMask', + 'Displacement', + 'Distance', + 'DumpLayerConnections', + 'DumpLayerNodes', + 'FindCenterElement', + 'FindNearestElement', + 'GetPosition', + 'GetTargetNodes', + 'GetSourceNodes', + 'GetTargetPositions', + 'GetSourcePositions', + 'PlotLayer', + 'PlotProbabilityParameter', + 'PlotTargets', + 'PlotSources', + 'SelectNodesByMask', ] @@ -140,7 +139,7 @@ def CreateMask(masktype, specs, anchor=None): {'lower_left' : [float, float, float], 'upper_right' : [float, float, float], 'azimuth_angle: float # default: 0.0, - 'polar_angle : float # default: 0.0} + 'polar_angle : float # defualt: 0.0} #or 'spherical' : {'radius' : float} @@ -163,7 +162,7 @@ def CreateMask(masktype, specs, anchor=None): By default the top-left corner of a grid mask, i.e., the grid mask element with grid index [0, 0], is aligned with the driver node. It can be changed by means of the 'anchor' parameter: - :: + :: 'anchor' : {'row' : float, @@ -189,9 +188,10 @@ def CreateMask(masktype, specs, anchor=None): nest.Connect(l, l, conndict) """ if anchor is None: - return nestkernel.llapi_create_mask({masktype: specs}) + return sli_func('CreateMask', {masktype: specs}) else: - return nestkernel.llapi_create_mask({masktype: specs, "anchor": anchor}) + return sli_func('CreateMask', + {masktype: specs, 'anchor': anchor}) def GetPosition(nodes): @@ -306,12 +306,13 @@ def Displacement(from_arg, to_arg): raise TypeError("to_arg must be a NodeCollection") if isinstance(from_arg, np.ndarray): - from_arg = (from_arg,) + from_arg = (from_arg, ) - if len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(to_arg): + if (len(from_arg) > 1 and len(to_arg) > 1 and not + len(from_arg) == len(to_arg)): raise ValueError("to_arg and from_arg must have same size unless one have size 1.") - return nestkernel.llapi_displacement(from_arg, to_arg) + return sli_func('Displacement', from_arg, to_arg) def Distance(from_arg, to_arg): @@ -373,12 +374,13 @@ def Distance(from_arg, to_arg): raise TypeError("to_arg must be a NodeCollection") if isinstance(from_arg, np.ndarray): - from_arg = (from_arg,) + from_arg = (from_arg, ) - if len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(to_arg): + if (len(from_arg) > 1 and len(to_arg) > 1 and not + len(from_arg) == len(to_arg)): raise ValueError("to_arg and from_arg must have same size unless one have size 1.") - return nestkernel.llapi_spatial_distance(from_arg, to_arg) + return sli_func('Distance', from_arg, to_arg) def FindNearestElement(layer, locations, find_all=False): @@ -441,7 +443,7 @@ def FindNearestElement(layer, locations, find_all=False): # Ensure locations is sequence, keeps code below simpler if not is_iterable(locations[0]): - locations = (locations,) + locations = (locations, ) result = [] @@ -477,11 +479,11 @@ def _rank_specific_filename(basename): np = NumProcesses() np_digs = len(str(np - 1)) # for pretty formatting rk = Rank() - dot = basename.find(".") + dot = basename.find('.') if dot < 0: - return "%s-%0*d" % (basename, np_digs, rk) + return '%s-%0*d' % (basename, np_digs, rk) else: - return "%s-%0*d%s" % (basename[:dot], np_digs, rk, basename[dot:]) + return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:]) def DumpLayerNodes(layer, outname): @@ -490,8 +492,7 @@ def DumpLayerNodes(layer, outname): Write `node ID` and position data to `outname` file. For each node in `layer`, a line with the following information is written: - - :: + :: node ID x-position y-position [z-position] @@ -531,12 +532,13 @@ def DumpLayerNodes(layer, outname): nest.DumpLayerNodes(s_nodes, 'positions.txt') """ - if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") - outname = stringify_path(outname) - nestkernel.llapi_dump_layer_nodes(layer._datum, _rank_specific_filename(outname)) + sli_func(""" + (w) file exch DumpLayerNodes close + """, + layer, _rank_specific_filename(outname)) def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): @@ -596,17 +598,21 @@ def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): # write connectivity information to file nest.DumpLayerConnections(s_nodes, s_nodes, 'static_synapse', 'conns.txt') """ - if not isinstance(source_layer, NodeCollection): raise TypeError("source_layer must be a NodeCollection") - if not isinstance(target_layer, NodeCollection): raise TypeError("target_layer must be a NodeCollection") - outname = stringify_path(outname) - nestkernel.llapi_dump_layer_connections( - source_layer._datum, target_layer._datum, synapse_model, _rank_specific_filename(outname) - ) + sli_func(""" + /oname Set + cvlit /synmod Set + /lyr_target Set + /lyr_source Set + oname (w) file lyr_source lyr_target synmod + DumpLayerConnections close + """, + source_layer, target_layer, synapse_model, + _rank_specific_filename(outname)) def FindCenterElement(layer): @@ -645,9 +651,9 @@ def FindCenterElement(layer): if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") - nearest_to_center = FindNearestElement(layer, layer.spatial["center"])[0] - index = layer.index(nearest_to_center.get("global_id")) - return layer[index : index + 1] + nearest_to_center = FindNearestElement(layer, layer.spatial['center'])[0] + index = layer.index(nearest_to_center.get('global_id')) + return layer[index:index+1] def GetTargetNodes(sources, tgt_layer, syn_model=None): @@ -858,11 +864,12 @@ def GetTargetPositions(sources, tgt_layer, syn_model=None): # Find positions to all nodes in target layer pos_all_tgts = GetPosition(tgt_layer) - first_tgt_node_id = tgt_layer[0].get("global_id") + first_tgt_node_id = tgt_layer[0].get('global_id') - connections = GetConnections(sources, tgt_layer, synapse_model=syn_model) - srcs = connections.get("source") - tgts = connections.get("target") + connections = GetConnections(sources, tgt_layer, + synapse_model=syn_model) + srcs = connections.get('source') + tgts = connections.get('target') if isinstance(srcs, int): srcs = [srcs] if isinstance(tgts, int): @@ -939,11 +946,12 @@ def GetSourcePositions(src_layer, targets, syn_model=None): # Find positions to all nodes in source layer pos_all_srcs = GetPosition(src_layer) - first_src_node_id = src_layer[0].get("global_id") + first_src_node_id = src_layer[0].get('global_id') - connections = GetConnections(src_layer, targets, synapse_model=syn_model) - srcs = connections.get("source") - tgts = connections.get("target") + connections = GetConnections(src_layer, targets, + synapse_model=syn_model) + srcs = connections.get('source') + tgts = connections.get('target') if isinstance(srcs, int): srcs = [srcs] if isinstance(tgts, int): @@ -989,7 +997,11 @@ def SelectNodesByMask(layer, anchor, mask_obj): mask_datum = mask_obj._datum - return nestkernel.llapi_select_nodes_by_mask(layer._datum, anchor, mask_datum) + node_id_list = sli_func('SelectNodesByMask', + layer, anchor, mask_datum) + + # When creating a NodeCollection, the input list of nodes IDs must be sorted. + return NodeCollection(sorted(node_id_list)) def _draw_extent(ax, xctr, yctr, xext, yext): @@ -1002,33 +1014,30 @@ def _draw_extent(ax, xctr, yctr, xext, yext): # thin gray line indicating extent llx, lly = xctr - xext / 2.0, yctr - yext / 2.0 urx, ury = llx + xext, lly + yext - ax.add_patch(plt.Rectangle((llx, lly), xext, yext, fc="none", ec="0.5", lw=1, zorder=1)) + ax.add_patch( + plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1, + zorder=1)) # set limits slightly outside extent - ax.set( - aspect="equal", - xlim=(llx - 0.05 * xext, urx + 0.05 * xext), - ylim=(lly - 0.05 * yext, ury + 0.05 * yext), - xticks=tuple(), - yticks=tuple(), - ) + ax.set(aspect='equal', + xlim=(llx - 0.05 * xext, urx + 0.05 * xext), + ylim=(lly - 0.05 * yext, ury + 0.05 * yext), + xticks=tuple(), yticks=tuple()) def _shifted_positions(pos, ext): """Get shifted positions corresponding to boundary conditions.""" - return [ - [pos[0] + ext[0], pos[1]], - [pos[0] - ext[0], pos[1]], - [pos[0], pos[1] + ext[1]], - [pos[0], pos[1] - ext[1]], - [pos[0] + ext[0], pos[1] - ext[1]], - [pos[0] - ext[0], pos[1] + ext[1]], - [pos[0] + ext[0], pos[1] + ext[1]], - [pos[0] - ext[0], pos[1] - ext[1]], - ] - - -def PlotLayer(layer, fig=None, nodecolor="b", nodesize=20): + return [[pos[0] + ext[0], pos[1]], + [pos[0] - ext[0], pos[1]], + [pos[0], pos[1] + ext[1]], + [pos[0], pos[1] - ext[1]], + [pos[0] + ext[0], pos[1] - ext[1]], + [pos[0] - ext[0], pos[1] + ext[1]], + [pos[0] + ext[0], pos[1] + ext[1]], + [pos[0] - ext[0], pos[1] - ext[1]]] + + +def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): """ Plot all nodes in a `layer`. @@ -1079,20 +1088,20 @@ def PlotLayer(layer, fig=None, nodecolor="b", nodesize=20): import matplotlib.pyplot as plt if not HAVE_MPL: - raise ImportError("Matplotlib could not be imported") + raise ImportError('Matplotlib could not be imported') if not isinstance(layer, NodeCollection): - raise TypeError("layer must be a NodeCollection.") + raise TypeError('layer must be a NodeCollection.') # get layer extent - ext = layer.spatial["extent"] + ext = layer.spatial['extent'] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext - xctr, yctr = layer.spatial["center"] + xctr, yctr = layer.spatial['center'] # extract position information, transpose to list of x and y pos if len(layer) == 1: @@ -1111,6 +1120,9 @@ def PlotLayer(layer, fig=None, nodecolor="b", nodesize=20): _draw_extent(ax, xctr, yctr, xext, yext) elif len(ext) == 3: + # 3D layer + from mpl_toolkits.mplot3d import Axes3D + # extract position information, transpose to list of x,y,z pos if len(layer) == 1: # handle case of single node @@ -1120,7 +1132,7 @@ def PlotLayer(layer, fig=None, nodecolor="b", nodesize=20): if fig is None: fig = plt.figure() - ax = fig.add_subplot(111, projection="3d") + ax = fig.add_subplot(111, projection='3d') else: ax = fig.gca() @@ -1133,20 +1145,10 @@ def PlotLayer(layer, fig=None, nodecolor="b", nodesize=20): return fig -def PlotTargets( - src_nrn, - tgt_layer, - syn_type=None, - fig=None, - mask=None, - probability_parameter=None, - src_color="red", - src_size=50, - tgt_color="blue", - tgt_size=20, - mask_color="yellow", - probability_cmap="Greens", -): +def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, + mask=None, probability_parameter=None, + src_color='red', src_size=50, tgt_color='blue', tgt_size=20, + mask_color='yellow', probability_cmap='Greens'): """ Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`. @@ -1233,14 +1235,14 @@ def PlotTargets( srcpos = GetPosition(src_nrn) # get layer extent - ext = tgt_layer.spatial["extent"] + ext = tgt_layer.spatial['extent'] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext - xctr, yctr = tgt_layer.spatial["center"] + xctr, yctr = tgt_layer.spatial['center'] if fig is None: fig = plt.figure() @@ -1258,22 +1260,18 @@ def PlotTargets( if mask is not None or probability_parameter is not None: edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] - PlotProbabilityParameter( - src_nrn, - probability_parameter, - mask=mask, - edges=edges, - ax=ax, - prob_cmap=probability_cmap, - mask_color=mask_color, - ) + PlotProbabilityParameter(src_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, + prob_cmap=probability_cmap, mask_color=mask_color) _draw_extent(ax, xctr, yctr, xext, yext) else: + # 3D layer + from mpl_toolkits.mplot3d import Axes3D + if fig is None: fig = plt.figure() - ax = fig.add_subplot(111, projection="3d") + ax = fig.add_subplot(111, projection='3d') else: ax = fig.gca() @@ -1290,20 +1288,10 @@ def PlotTargets( return fig -def PlotSources( - src_layer, - tgt_nrn, - syn_type=None, - fig=None, - mask=None, - probability_parameter=None, - tgt_color="red", - tgt_size=50, - src_color="blue", - src_size=20, - mask_color="yellow", - probability_cmap="Greens", -): +def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, + mask=None, probability_parameter=None, + tgt_color='red', tgt_size=50, src_color='blue', src_size=20, + mask_color='yellow', probability_cmap='Greens'): """ Plot all sources of target neuron `tgt_nrn` in a source layer `src_layer`. @@ -1388,14 +1376,14 @@ def PlotSources( tgtpos = GetPosition(tgt_nrn) # get layer extent - ext = src_layer.spatial["extent"] + ext = src_layer.spatial['extent'] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext - xctr, yctr = src_layer.spatial["center"] + xctr, yctr = src_layer.spatial['center'] if fig is None: fig = plt.figure() @@ -1413,15 +1401,8 @@ def PlotSources( if mask is not None or probability_parameter is not None: edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] - PlotProbabilityParameter( - tgt_nrn, - probability_parameter, - mask=mask, - edges=edges, - ax=ax, - prob_cmap=probability_cmap, - mask_color=mask_color, - ) + PlotProbabilityParameter(tgt_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, + prob_cmap=probability_cmap, mask_color=mask_color) _draw_extent(ax, xctr, yctr, xext, yext) @@ -1431,7 +1412,7 @@ def PlotSources( if fig is None: fig = plt.figure() - ax = fig.add_subplot(111, projection="3d") + ax = fig.add_subplot(111, projection='3d') else: ax = fig.gca() @@ -1448,35 +1429,37 @@ def PlotSources( return fig -def _create_mask_patches(mask, periodic, extent, source_pos, face_color="yellow"): +def _create_mask_patches(mask, periodic, extent, source_pos, face_color='yellow'): """Create Matplotlib Patch objects representing the mask""" # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest - import matplotlib as mtpl import matplotlib.pyplot as plt + import matplotlib as mtpl - edge_color = "black" + edge_color = 'black' alpha = 0.2 line_width = 2 mask_patches = [] - if "anchor" in mask: - offs = np.array(mask["anchor"]) + if 'anchor' in mask: + offs = np.array(mask['anchor']) else: - offs = np.array([0.0, 0.0]) + offs = np.array([0., 0.]) - if "circular" in mask: - r = mask["circular"]["radius"] + if 'circular' in mask: + r = mask['circular']['radius'] - patch = plt.Circle(source_pos + offs, radius=r, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Circle(source_pos + offs, radius=r, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs, extent): - patch = plt.Circle(pos, radius=r, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Circle(pos, radius=r, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) - elif "doughnut" in mask: + elif 'doughnut' in mask: # Mmm... doughnut def make_doughnut_patch(pos, r_out, r_in, ec, fc, alpha): def make_circle(r): @@ -1485,7 +1468,6 @@ def make_circle(r): x = r * np.cos(t) y = r * np.sin(t) return np.hstack((x, y)) - outside_verts = make_circle(r_out)[::-1] inside_verts = make_circle(r_in) codes = np.ones(len(inside_verts), dtype=mpath.Path.code_type) * mpath.Path.LINETO @@ -1496,8 +1478,8 @@ def make_circle(r): path = mpath.Path(vertices, all_codes) return mpatches.PathPatch(path, fc=fc, ec=ec, alpha=alpha, lw=line_width) - r_in = mask["doughnut"]["inner_radius"] - r_out = mask["doughnut"]["outer_radius"] + r_in = mask['doughnut']['inner_radius'] + r_out = mask['doughnut']['outer_radius'] pos = source_pos + offs patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) mask_patches.append(patch) @@ -1505,20 +1487,21 @@ def make_circle(r): for pos in _shifted_positions(source_pos + offs, extent): patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) mask_patches.append(patch) - elif "rectangular" in mask: - ll = np.array(mask["rectangular"]["lower_left"]) - ur = np.array(mask["rectangular"]["upper_right"]) + elif 'rectangular' in mask: + ll = np.array(mask['rectangular']['lower_left']) + ur = np.array(mask['rectangular']['upper_right']) width = ur[0] - ll[0] height = ur[1] - ll[1] pos = source_pos + ll + offs - cntr = [pos[0] + width / 2, pos[1] + height / 2] + cntr = [pos[0] + width/2, pos[1] + height/2] - if "azimuth_angle" in mask["rectangular"]: - angle = mask["rectangular"]["azimuth_angle"] + if 'azimuth_angle' in mask['rectangular']: + angle = mask['rectangular']['azimuth_angle'] else: angle = 0.0 - patch = plt.Rectangle(pos, width, height, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Rectangle(pos, width, height, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) # Need to rotate about center trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData patch.set_transform(trnsf) @@ -1526,57 +1509,42 @@ def make_circle(r): if periodic: for pos in _shifted_positions(source_pos + ll + offs, extent): - patch = plt.Rectangle(pos, width, height, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Rectangle(pos, width, height, + fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) - cntr = [pos[0] + width / 2, pos[1] + height / 2] + cntr = [pos[0] + width/2, pos[1] + height/2] # Need to rotate about center trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData patch.set_transform(trnsf) mask_patches.append(patch) - elif "elliptical" in mask: - width = mask["elliptical"]["major_axis"] - height = mask["elliptical"]["minor_axis"] - if "azimuth_angle" in mask["elliptical"]: - angle = mask["elliptical"]["azimuth_angle"] + elif 'elliptical' in mask: + width = mask['elliptical']['major_axis'] + height = mask['elliptical']['minor_axis'] + if 'azimuth_angle' in mask['elliptical']: + angle = mask['elliptical']['azimuth_angle'] else: angle = 0.0 - if "anchor" in mask["elliptical"]: - anchor = mask["elliptical"]["anchor"] + if 'anchor' in mask['elliptical']: + anchor = mask['elliptical']['anchor'] else: - anchor = np.array([0.0, 0.0]) - patch = mpl.patches.Ellipse( - source_pos + offs + anchor, - width, - height, - angle=angle, - fc=face_color, - ec=edge_color, - alpha=alpha, - lw=line_width, - ) + anchor = np.array([0., 0.]) + patch = mpl.patches.Ellipse(source_pos + offs + anchor, width, height, + angle=angle, fc=face_color, + ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs + anchor, extent): - patch = mpl.patches.Ellipse( - pos, width, height, angle=angle, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width - ) + patch = mpl.patches.Ellipse(pos, width, height, angle=angle, fc=face_color, + ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) else: - raise ValueError("Mask type cannot be plotted with this version of PyNEST.") + raise ValueError('Mask type cannot be plotted with this version of PyNEST.') return mask_patches -def PlotProbabilityParameter( - source, - parameter=None, - mask=None, - edges=[-0.5, 0.5, -0.5, 0.5], - shape=[100, 100], - ax=None, - prob_cmap="Greens", - mask_color="yellow", -): +def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5, -0.5, 0.5], shape=[100, 100], + ax=None, prob_cmap='Greens', mask_color='yellow'): """ Create a plot of the connection probability and/or mask. @@ -1610,10 +1578,10 @@ def PlotProbabilityParameter( import matplotlib.pyplot as plt if not HAVE_MPL: - raise ImportError("Matplotlib could not be imported") + raise ImportError('Matplotlib could not be imported') if parameter is None and mask is None: - raise ValueError("At least one of parameter or mask must be specified") + raise ValueError('At least one of parameter or mask must be specified') if ax is None: fig, ax = plt.subplots() ax.set_xlim(*edges[:2]) @@ -1625,14 +1593,13 @@ def PlotProbabilityParameter( positions = [[x, y] for y in np.linspace(edges[2], edges[3], shape[1])] values = parameter.apply(source, positions) z[:, i] = np.array(values) - img = ax.imshow( - np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, origin="lower", cmap=prob_cmap, vmin=0.0, vmax=1.0 - ) + img = ax.imshow(np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, + origin='lower', cmap=prob_cmap, vmin=0., vmax=1.) plt.colorbar(img, ax=ax, fraction=0.046, pad=0.04) if mask is not None: - periodic = source.spatial["edge_wrap"] - extent = source.spatial["extent"] + periodic = source.spatial['edge_wrap'] + extent = source.spatial['extent'] source_pos = GetPosition(source) patches = _create_mask_patches(mask, periodic, extent, source_pos, face_color=mask_color) for patch in patches: diff --git a/pynest/nest/lib/hl_api_types.py b/pynest/nest/lib/hl_api_types.py index d3d93438d4..c4c2cab999 100644 --- a/pynest/nest/lib/hl_api_types.py +++ b/pynest/nest/lib/hl_api_types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_types.py +# _hl_api_types.py # # This file is part of NEST. # @@ -23,45 +23,42 @@ Classes defining the different PyNEST types """ -import json -import numbers -from math import floor, log - -import numpy - +from .._ll_api import * +from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ..ll_api import * -from .hl_api_helper import ( +from ._hl_api_helper import ( get_parameters, get_parameters_hierarchical_addressing, is_iterable, restructure_data, ) -from .hl_api_simulation import GetKernelStatus +from ._hl_api_simulation import GetKernelStatus def sli_func(*args, **kwargs): - raise RuntimeError(f"Called sli_func with\nargs: {args}\nkwargs: {kwargs}") + raise RuntimeError(f'Called sli_func with\nargs: {args}\nkwargs: {kwargs}') +import numpy +import json +from math import floor, log try: import pandas - HAVE_PANDAS = True except ImportError: HAVE_PANDAS = False __all__ = [ - "CollocatedSynapses", - "Compartments", - "CreateParameter", - "Mask", - "NodeCollection", - "Parameter", - "Receptors", - "serialize_data", - "SynapseCollection", - "to_json", + 'CollocatedSynapses', + 'Compartments', + 'CreateParameter', + 'Mask', + 'NodeCollection', + 'Parameter', + 'Receptors', + 'serializable', + 'SynapseCollection', + 'to_json', ] @@ -87,8 +84,7 @@ def CreateParameter(parametertype, specs): Notes ----- - - Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for + - Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for instance :py:func:`.uniform`. **Parameter types** @@ -97,14 +93,11 @@ def CreateParameter(parametertype, specs): acceptable keys for their corresponding specification dictionaries: * Constant - :: 'constant' : {'value' : float} # constant value - * Randomization - :: # random parameter with uniform distribution in [min,max) @@ -121,7 +114,6 @@ def CreateParameter(parametertype, specs): 'lognormal' : {'mean' : float, # mean value of logarithm, default: 0.0 'std' : float} # standard deviation of log, default: 1.0 - """ return nestkernel.llapi_create_parameter({parametertype: specs}) @@ -220,16 +212,10 @@ def __iter__(self): def __add__(self, other): if not isinstance(other, NodeCollection): - if isinstance(other, numbers.Number) and other == 0: - other = NodeCollection() - else: - raise TypeError(f"Cannot add object of type '{type(other).__name__}' to 'NodeCollection'") + raise NotImplementedError() return nestkernel.llapi_join_nc(self._datum, other._datum) - def __radd__(self, other): - return self + other - def __getitem__(self, key): if isinstance(key, slice): if key.start is None: @@ -237,60 +223,60 @@ def __getitem__(self, key): else: start = key.start + 1 if key.start >= 0 else key.start if abs(start) > self.__len__(): - raise IndexError("slice start value outside of the NodeCollection") + raise IndexError('slice start value outside of the NodeCollection') if key.stop is None: stop = self.__len__() else: stop = key.stop if key.stop > 0 else key.stop - 1 if abs(stop) > self.__len__(): - raise IndexError("slice stop value outside of the NodeCollection") + raise IndexError('slice stop value outside of the NodeCollection') step = 1 if key.step is None else key.step if step < 1: - raise IndexError("slicing step for NodeCollection must be strictly positive") + raise IndexError('slicing step for NodeCollection must be strictly positive') return nestkernel.llapi_slice(self._datum, start, stop, step) elif isinstance(key, (int, numpy.integer)): if abs(key + (key >= 0)) > self.__len__(): - raise IndexError("index value outside of the NodeCollection") - return self[key : key + 1 : 1] + raise IndexError('index value outside of the NodeCollection') + return self[key:key + 1:1] elif isinstance(key, (list, tuple)): if len(key) == 0: return NodeCollection([]) # Must check if elements are bool first, because bool inherits from int if all(isinstance(x, bool) for x in key): if len(key) != len(self): - raise IndexError("Bool index array must be the same length as NodeCollection") + raise IndexError('Bool index array must be the same length as NodeCollection') np_key = numpy.array(key, dtype=bool) # Checking that elements are not instances of bool too, because bool inherits from int - elif all(isinstance(x, (int, numpy.integer)) and not isinstance(x, bool) for x in key): + elif all(isinstance(x, int) and not isinstance(x, bool) for x in key): np_key = numpy.array(key, dtype=numpy.uint64) if len(numpy.unique(np_key)) != len(np_key): - raise ValueError("All node IDs in a NodeCollection have to be unique") + raise ValueError('All node IDs in a NodeCollection have to be unique') else: - raise TypeError("Indices must be integers or bools") + raise TypeError('Indices must be integers or bools') return nestkernel.llapi_take_array_index(self._datum, np_key) elif isinstance(key, numpy.ndarray): if len(key) == 0: return NodeCollection([]) if len(key.shape) != 1: - raise TypeError("NumPy indices must one-dimensional") + raise TypeError('NumPy indices must one-dimensional') is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type) if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)): - raise TypeError("NumPy indices must be an array of integers or bools") + raise TypeError('NumPy indices must be an array of integers or bools') if is_booltype and len(key) != len(self): - raise IndexError("Bool index array must be the same length as NodeCollection") + raise IndexError('Bool index array must be the same length as NodeCollection') if not is_booltype and len(numpy.unique(key)) != len(key): - raise ValueError("All node IDs in a NodeCollection have to be unique") + raise ValueError('All node IDs in a NodeCollection have to be unique') return nestkernel.llapi_take_array_index(self._datum, key) else: - raise IndexError("only integers, slices, lists, tuples, and numpy arrays are valid indices") + raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices') def __contains__(self, node_id): return nestkernel.llapi_nc_contains(self._datum, node_id) def __eq__(self, other): if not isinstance(other, NodeCollection): - raise NotImplementedError("Cannot compare NodeCollection to {}".format(type(other).__name__)) + raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__)) if self.__len__() != other.__len__(): return False @@ -327,7 +313,7 @@ def get(self, *params, **kwargs): This is for hierarchical addressing. output : str, ['pandas','json'], optional If the returned data should be in a Pandas DataFrame or in a - JSON string format. + JSON serializable format. Returns ------- @@ -380,19 +366,21 @@ def get(self, *params, **kwargs): """ if not self: - raise ValueError("Cannot get parameter of empty NodeCollection") + raise ValueError('Cannot get parameter of empty NodeCollection') # ------------------------- # # Checks of input # # ------------------------- # if not kwargs: - output = "" - elif "output" in kwargs: - output = kwargs["output"] - if output == "pandas" and not HAVE_PANDAS: - raise ImportError("Pandas could not be imported") + output = '' + elif 'output' in kwargs: + output = kwargs['output'] + if output == 'pandas' and not HAVE_PANDAS: + raise ImportError('Pandas could not be imported') else: - raise TypeError("Got unexpected keyword argument") + raise TypeError('Got unexpected keyword argument') + + pandas_output = output == 'pandas' if len(params) == 0: # get() is called without arguments @@ -400,25 +388,23 @@ def get(self, *params, **kwargs): elif len(params) == 1: # params is a tuple with a string or list of strings result = get_parameters(self, params[0]) - if params[0] == "compartments": + if params[0] == 'compartments': result = Compartments(self, result) - elif params[0] == "receptors": + elif params[0] == 'receptors': result = Receptors(self, result) else: # Hierarchical addressing # TODO-PYNEST-NG: Drop this? Not sure anyone ever used it... result = get_parameters_hierarchical_addressing(self, params) - # TODO-PYNEST-NG: Decide if the behavior should be the same - # for single-node node collections or different. if isinstance(result, dict) and len(self) == 1: new_result = {} for k, v in result.items(): - new_result[k] = v[0] if is_iterable(v) and len(v) == 1 and type(v) is not dict else v + new_result[k] = v[0] if is_iterable(v) and len(v) == 1 else v result = new_result - if output == "pandas": - index = self.get("global_id") + if pandas_output: + index = self.get('global_id') if len(params) == 1 and isinstance(params[0], str): # params is a string result = {params[0]: result} @@ -429,7 +415,7 @@ def get(self, *params, **kwargs): index = [index] result = {key: [val] for key, val in result.items()} result = pandas.DataFrame(result, index=index) - elif output == "json": + elif output == 'json': result = to_json(result) return result @@ -470,26 +456,25 @@ def set(self, params=None, **kwargs): local_nodes = [self.local] if len(self) == 1 else self.local - if isinstance(params, dict) and "compartments" in params: - if isinstance(params["compartments"], Compartments): - params["compartments"] = params["compartments"].get_tuple() - elif params["compartments"] is None: + if isinstance(params, dict) and 'compartments' in params: + if isinstance(params['compartments'], Compartments): + params['compartments'] = params['compartments'].get_tuple() + elif params['compartments'] is None: # Adding compartments has been handled by the += operator, so we can remove the entry. - params.pop("compartments") + params.pop('compartments') - if isinstance(params, dict) and "receptors" in params: - if isinstance(params["receptors"], Receptors): - params["receptors"] = params["receptors"].get_tuple() - elif params["receptors"] is None: + if isinstance(params, dict) and 'receptors' in params: + if isinstance(params['receptors'], Receptors): + params['receptors'] = params['receptors'].get_tuple() + elif params['receptors'] is None: # Adding receptors has been handled by the += operator, so we can remove the entry. - params.pop("receptors") + params.pop('receptors') if isinstance(params, dict) and all(local_nodes): + node_params = self[0].get() - contains_list = [ - is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) - for key, vals in params.items() - ] + iterable_node_param = lambda key: key in node_params and not is_iterable(node_params[key]) + contains_list = [is_iterable(vals) and iterable_node_param(key) for key, vals in params.items()] if any(contains_list): temp_param = [{} for _ in range(self.__len__())] @@ -515,7 +500,8 @@ def tolist(self): if self.__len__() == 0: return [] - return list(self.get("global_id")) if len(self) > 1 else [self.get("global_id")] + return (list(self.get('global_id')) if len(self) > 1 + else [self.get('global_id')]) def index(self, node_id): """ @@ -534,7 +520,7 @@ def index(self, node_id): index = nestkernel.llapi_nc_find(self._datum, node_id) if index == -1: - raise ValueError("{} is not in NodeCollection".format(node_id)) + raise ValueError('{} is not in NodeCollection'.format(node_id)) return index @@ -548,9 +534,9 @@ def __array__(self, dtype=None): def __getattr__(self, attr): if not self: - raise AttributeError("Cannot get attribute of empty NodeCollection") + raise AttributeError('Cannot get attribute of empty NodeCollection') - if attr == "spatial": + if attr == 'spatial': metadata = nestkernel.llapi_get_nc_metadata(self._datum) val = metadata if metadata else None super().__setattr__(attr, val) @@ -560,7 +546,7 @@ def __getattr__(self, attr): # raises AttributeError to tell NumPy that interfaces other than # __array__ are not available (otherwise get_parameters would be # queried, KeyError would be raised, and all would crash) - if attr.startswith("__array_"): + if attr.startswith('__array_'): raise AttributeError return self.get(attr) @@ -568,7 +554,7 @@ def __getattr__(self, attr): def __setattr__(self, attr, value): # `_datum` is the only property of NodeCollection that should not be # interpreted as a property of the model - if attr == "_datum": + if attr == '_datum': super().__setattr__(attr, value) else: self.set({attr: value}) @@ -604,16 +590,17 @@ class SynapseCollection: _datum = None def __init__(self, data): + if isinstance(data, list): for datum in data: - if not isinstance(datum, nestkernel.ConnectionObject): + if (not isinstance(datum, nestkernel.ConnectionObject)): raise TypeError("Expected ConnectionObject.") self._datum = data elif data is None: # We can have an empty SynapseCollection if there are no connections. self._datum = data else: - if not isinstance(data, nestkernel.ConnectionObject): + if (not isinstance(data, nestkernel.ConnectionObject)): raise TypeError("Expected ConnectionObject.") # self._datum needs to be a list of ConnectionObjects. self._datum = [data] @@ -634,8 +621,10 @@ def __eq__(self, other): if self.__len__() != other.__len__(): return False - self_get = self.get(["source", "target", "target_thread", "synapse_id", "port"]) - other_get = other.get(["source", "target", "target_thread", "synapse_id", "port"]) + self_get = self.get(['source', 'target', 'target_thread', + 'synapse_id', 'port']) + other_get = other.get(['source', 'target', 'target_thread', + 'synapse_id', 'port']) if self_get != other_get: return False return True @@ -676,23 +665,23 @@ def __str__(self): def format_row_(s, t, sm, w, dly): try: - return f"{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}" + return f'{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}' except ValueError: # Used when we have many connections and print_full=False - return f"{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}" + return f'{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}' MAX_SIZE_FULL_PRINT = 35 # 35 is arbitrarily chosen. params = self.get() if len(params) == 0: - return "The synapse collection does not contain any connections." + return 'The synapse collection does not contain any connections.' - srcs = params["source"] - trgt = params["target"] - wght = params["weight"] - dlay = params["delay"] - s_model = params["synapse_model"] + srcs = params['source'] + trgt = params['target'] + wght = params['weight'] + dlay = params['delay'] + s_model = params['synapse_model'] if isinstance(srcs, int): srcs = [srcs] @@ -701,11 +690,11 @@ def format_row_(s, t, sm, w, dly): dlay = [dlay] s_model = [s_model] - src_h = "source" - trg_h = "target" - sm_h = "synapse model" - w_h = "weight" - d_h = "delay" + src_h = 'source' + trg_h = 'target' + sm_h = 'synapse model' + w_h = 'weight' + d_h = 'delay' # Find maximum number of characters for each column, used to determine width of column src_len = max(len(src_h) + 2, floor(log(max(srcs), 10))) @@ -717,23 +706,21 @@ def format_row_(s, t, sm, w, dly): # 35 is arbitrarily chosen. if len(srcs) >= MAX_SIZE_FULL_PRINT and not self.print_full: # u'\u22EE ' is the unicode for vertical ellipsis, used when we have many connections - srcs = srcs[:15] + ["\u22EE "] + srcs[-15:] - trgt = trgt[:15] + ["\u22EE "] + trgt[-15:] - wght = wght[:15] + ["\u22EE "] + wght[-15:] - dlay = dlay[:15] + ["\u22EE "] + dlay[-15:] - s_model = s_model[:15] + ["\u22EE "] + s_model[-15:] - - headers = f"{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}" + "\n" - borders = ( - "-" * src_len + " " + "-" * trg_len + " " + "-" * sm_len + " " + "-" * w_len + " " + "-" * d_len + "\n" - ) - output = "\n".join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) + srcs = srcs[:15] + [u'\u22EE '] + srcs[-15:] + trgt = trgt[:15] + [u'\u22EE '] + trgt[-15:] + wght = wght[:15] + [u'\u22EE '] + wght[-15:] + dlay = dlay[:15] + [u'\u22EE '] + dlay[-15:] + s_model = s_model[:15] + [u'\u22EE '] + s_model[-15:] + + headers = f'{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}' + '\n' + borders = '-'*src_len + ' ' + '-'*trg_len + ' ' + '-'*sm_len + ' ' + '-'*w_len + ' ' + '-'*d_len + '\n' + output = '\n'.join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) result = headers + borders + output return result def __getattr__(self, attr): - if attr == "distance": + if attr == 'distance': dist = nestkernel.llapi_distance(self._datum) super().__setattr__(attr, dist) return self.distance @@ -743,26 +730,26 @@ def __getattr__(self, attr): def __setattr__(self, attr, value): # `_datum` is the only property of SynapseCollection that should not be # interpreted as a property of the model - if attr == "_datum" or attr == "print_full": + if attr == '_datum' or attr == 'print_full': super().__setattr__(attr, value) else: self.set({attr: value}) def sources(self): """Returns iterator containing the source node IDs of the `SynapseCollection`.""" - sources = self.get("source") + sources = self.get('source') if not isinstance(sources, (list, tuple)): sources = (sources,) return iter(sources) def targets(self): """Returns iterator containing the target node IDs of the `SynapseCollection`.""" - targets = self.get("target") + targets = self.get('target') if not isinstance(targets, (list, tuple)): targets = (targets,) return iter(targets) - def get(self, keys=None, output=""): + def get(self, keys=None, output=''): """ Return a parameter dictionary of the connections. @@ -779,7 +766,7 @@ def get(self, keys=None, output=""): belonging to the given `keys`. output : str, ['pandas','json'], optional If the returned data should be in a Pandas DataFrame or in a - JSON string format. + JSON serializable format. Returns ------- @@ -819,12 +806,12 @@ def get(self, keys=None, output=""): {'source': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} """ - pandas_output = output == "pandas" + pandas_output = output == 'pandas' if pandas_output and not HAVE_PANDAS: - raise ImportError("Pandas could not be imported") + raise ImportError('Pandas could not be imported') # Return empty dictionary if we have no connections or if we have done a nest.ResetKernel() - num_conns = GetKernelStatus("num_connections") # Has to be called first because it involves MPI communication. + num_conns = GetKernelStatus('num_connections') # Has to be called first because it involves MPI communication. if self.__len__() == 0 or num_conns == 0: # Return empty tuple if get is called with an argument return {} if keys is None else () @@ -843,11 +830,12 @@ def get(self, keys=None, output=""): final_result = restructure_data(result, keys) if pandas_output: - index = self.get("source") if self.__len__() > 1 else (self.get("source"),) + index = (self.get('source') if self.__len__() > 1 else + (self.get('source'),)) if isinstance(keys, str): final_result = {keys: final_result} final_result = pandas.DataFrame(final_result, index=index) - elif output == "json": + elif output == 'json': final_result = to_json(final_result) return final_result @@ -882,11 +870,12 @@ def set(self, params=None, **kwargs): # This was added to ensure that the function is a nop (instead of, # for instance, raising an exception) when applied to an empty # SynapseCollection, or after having done a nest.ResetKernel(). - if self.__len__() == 0 or GetKernelStatus("network_size") == 0: + if self.__len__() == 0 or GetKernelStatus('network_size') == 0: return - if isinstance(params, (list, tuple)) and self.__len__() != len(params): - raise TypeError(f"status dict must be a dict, or a list of dicts of length {self.__len__()}") + if (isinstance(params, (list, tuple)) and + self.__len__() != len(params)): + raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__())) if kwargs and params is None: params = kwargs @@ -895,10 +884,8 @@ def set(self, params=None, **kwargs): if isinstance(params, dict): node_params = self[0].get() - contains_list = [ - is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) - for key, vals in params.items() - ] + contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for + key, vals in params.items()] if any(contains_list): temp_param = [{} for _ in range(self.__len__())] @@ -918,7 +905,8 @@ def disconnect(self): """ Disconnect the connections in the `SynapseCollection`. """ - nestkernel.llapi_disconnect_syncoll(self._datum) + sps(self._datum) + sr('Disconnect_a') class CollocatedSynapses: @@ -930,19 +918,18 @@ class CollocatedSynapses: Example ------- - :: - - nodes = nest.Create('iaf_psc_alpha', 3) - syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, - {'synapse_model': 'stdp_synapse'}, - {'synapse_model': 'stdp_synapse', 'alpha': 3.}) - nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) + :: - conns = nest.GetConnections() + nodes = nest.Create('iaf_psc_alpha', 3) + syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, + {'synapse_model': 'stdp_synapse'}, + {'synapse_model': 'stdp_synapse', 'alpha': 3.}) + nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) - print(conns.alpha) - print(len(syn_spec)) + conns = nest.GetConnections() + print(conns.alpha) + print(len(syn_spec)) """ def __init__(self, *args): @@ -965,27 +952,26 @@ class Mask: _datum = None # The constructor should not be called by the user - def __init__(self, data): + def __init__(self, datum): """Masks must be created using the CreateMask command.""" - if not isinstance(data, nestkernel.MaskObject): - raise TypeError("Expected MaskObject.") - self._datum = data + if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype": + raise TypeError("expected mask Datum") + self._datum = datum - # TODO-PYNEST-NG: Convert operators # Generic binary operation - def _binop(self, op, rhs): - if not isinstance(rhs, Mask): + def _binop(self, op, other): + if not isinstance(other, Mask): raise NotImplementedError() - return sli_func(op, self._datum, rhs._datum) + return sli_func(op, self._datum, other._datum) - def __or__(self, rhs): - return self._binop("or", rhs) + def __or__(self, other): + return self._binop("or", other) - def __and__(self, rhs): - return self._binop("and", rhs) + def __and__(self, other): + return self._binop("and", other) - def __sub__(self, rhs): - return self._binop("sub", rhs) + def __sub__(self, other): + return self._binop("sub", other) def Inside(self, point): """ @@ -1001,7 +987,7 @@ def Inside(self, point): out : bool True if the point is inside the mask, False otherwise """ - return nestkernel.llapi_inside_mask(point, self._datum) + return sli_func("Inside", point, self._datum) # TODO-PYNEST-NG: We may consider moving the entire (or most of) Parameter class to the cython level. @@ -1020,9 +1006,8 @@ class Parameter: def __init__(self, datum): """Parameters must be created using the CreateParameter command.""" if not isinstance(datum, nestkernel.ParameterObject): - raise TypeError( - "expected low-level parameter object;" " use the CreateParameter() function to create a Parameter" - ) + raise TypeError("expected low-level parameter object;" + " use the CreateParameter() function to create a Parameter") self._datum = datum def _arg_as_parameter(self, arg): @@ -1030,23 +1015,20 @@ def _arg_as_parameter(self, arg): return arg if isinstance(arg, (int, float)): # Value for the constant parameter must be float. - return CreateParameter("constant", {"value": float(arg)}) + return CreateParameter('constant', {'value': float(arg)}) raise NotImplementedError() def __add__(self, other): return nestkernel.llapi_add_parameter(self._datum, self._arg_as_parameter(other)._datum) - def __radd__(self, lhs): - return self + lhs + def __radd__(self, other): + return self + other def __sub__(self, other): return nestkernel.llapi_subtract_parameter(self._datum, self._arg_as_parameter(other)._datum) - def __rsub__(self, lhs): - return self * (-1) + lhs - - def __pos__(self): - return self + def __rsub__(self, other): + return self * (-1) + other def __neg__(self): return self * (-1) @@ -1054,8 +1036,8 @@ def __neg__(self): def __mul__(self, other): return nestkernel.llapi_multiply_parameter(self._datum, self._arg_as_parameter(other)._datum) - def __rmul__(self, lhs): - return self * lhs + def __rmul__(self, other): + return self * other def __div__(self, other): return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) @@ -1064,25 +1046,25 @@ def __truediv__(self, other): return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) def __pow__(self, exponent): - return nestkernel.llapi_pow_parameter(self._datum, float(exponent)) + return nestkernel.llapi_pow_parameter(self._datum, self._arg_as_parameter(float(exponent))._datum) def __lt__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 0}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 0}) def __le__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 1}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 1}) def __eq__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 2}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 2}) def __ne__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 3}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 3}) def __ge__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 4}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 4}) def __gt__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 5}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 5}) def GetValue(self): """ @@ -1119,25 +1101,24 @@ def apply(self, spatial_nc, positions=None): return nestkernel.llapi_apply_parameter(self._datum, spatial_nc) else: if len(spatial_nc) != 1: - raise ValueError("The NodeCollection must contain a single node ID only") + raise ValueError('The NodeCollection must contain a single node ID only') if not isinstance(positions, (list, tuple)): - raise TypeError("Positions must be a list or tuple of positions") + raise TypeError('Positions must be a list or tuple of positions') for pos in positions: if not isinstance(pos, (list, tuple, numpy.ndarray)): - raise TypeError("Each position must be a list or tuple") + raise TypeError('Each position must be a list or tuple') if len(pos) != len(positions[0]): - raise ValueError("All positions must have the same number of dimensions") - return nestkernel.llapi_apply_parameter(self._datum, {"source": spatial_nc, "targets": positions}) + raise ValueError('All positions must have the same number of dimensions') + return nestkernel.llapi_apply_parameter(self._datum, {'source': spatial_nc, 'targets': positions}) class CmBase: + def __init__(self, node_collection, elements): if not isinstance(node_collection, NodeCollection): - raise TypeError(f"node_collection must be a NodeCollection, got {type(node_collection)}") - if isinstance(elements, list): - elements = tuple(elements) + raise TypeError(f'node_collection must be a NodeCollection, got {type(node_collection)}') if not isinstance(elements, tuple): - raise TypeError(f"elements must be a tuple of dicts, got {type(elements)}") + raise TypeError(f'elements must be a tuple of dicts, got {type(elements)}') self._elements = elements self._node_collection = node_collection @@ -1148,17 +1129,14 @@ def __add__(self, other): elif isinstance(other, (tuple, list)): if not all(isinstance(d, dict) for d in other): raise TypeError( - f"{self.__class__.__name__} can only be added with dicts, lists of dicts, " - f"or other {self.__class__.__name__}" - ) + f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' + f'or other {self.__class__.__name__}') new_elements += list(other) elif isinstance(other, self.__class__): new_elements += list(other._elements) else: - raise NotImplementedError( - f"{self.__class__.__name__} can only be added with dicts, lists of dicts," - f" or other {self.__class__.__name__}, got {type(other)}" - ) + raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' + f' or other {self.__class__.__name__}, got {type(other)}') return self.__class__(self._node_collection, tuple(new_elements)) @@ -1167,19 +1145,15 @@ def __iadd__(self, other): new_elements = [other] elif isinstance(other, (tuple, list)): if not all(isinstance(d, dict) for d in other): - raise TypeError( - f"{self.__class__.__name__} can only be added with dicts, lists of dicts, " - f"or other {self.__class__.__name__}" - ) + raise TypeError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' + f'or other {self.__class__.__name__}') new_elements = list(other) elif isinstance(other, self.__class__): new_elements = list(other._elements) else: - raise NotImplementedError( - f"{self.__class__.__name__} can only be added with dicts, lists of dicts," - f" or other {self.__class__.__name__}, got {type(other)}" - ) - self._node_collection.set({f"add_{self.__class__.__name__.lower()}": new_elements}) + raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' + f' or other {self.__class__.__name__}, got {type(other)}') + self._node_collection.set({f'add_{self.__class__.__name__.lower()}': new_elements}) return None # Flagging elements as added by returning None def __getitem__(self, key): @@ -1202,8 +1176,8 @@ class Receptors(CmBase): pass -def serialize_data(data): - """Serialize data for JSON. +def serializable(data): + """Make data serializable for JSON. Parameters ---------- @@ -1217,7 +1191,7 @@ def serialize_data(data): if isinstance(data, (numpy.ndarray, NodeCollection)): return data.tolist() - elif isinstance(data, SynapseCollection): + if isinstance(data, SynapseCollection): # Get full information from SynapseCollection return serializable(data.get()) if isinstance(data, (list, tuple)): @@ -1228,7 +1202,7 @@ def serialize_data(data): def to_json(data, **kwargs): - """Convert the object to a JSON string. + """Serialize data to JSON. Parameters ---------- @@ -1239,9 +1213,9 @@ def to_json(data, **kwargs): Returns ------- data_json : str - JSON string format of the data + JSON format of the data """ - data_serialized = serialize_data(data) + data_serialized = serializable(data) data_json = json.dumps(data_serialized, **kwargs) return data_json diff --git a/pynest/nest/logic/_hl_api_logic.py b/pynest/nest/logic/_hl_api_logic.py deleted file mode 100644 index 70d194ad8b..0000000000 --- a/pynest/nest/logic/_hl_api_logic.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_logic.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -from ..lib._hl_api_types import CreateParameter -from .. import nestkernel_api as nestkernel - -__all__ = [ - 'conditional', -] - - -def conditional(condition, param_if_true, param_if_false): - """ - Yields one value or another, based on the condition. - - Parameters - ---------- - condition : Parameter - A comparing Parameter, created with the usual comparators. - param_if_true : [Parameter | float] - Value or Parameter used to get a value used if the condition evaluates to true. - param_if_false : [Parameter | float] - Value or Parameter used to get a value used if the condition evaluates to false. - - Returns - ------- - Parameter: - Object representing the conditional. - """ - if isinstance(param_if_true, (int, float)): - param_if_true = CreateParameter( - 'constant', {'value': float(param_if_true)}) - if isinstance(param_if_false, (int, float)): - param_if_false = CreateParameter( - 'constant', {'value': float(param_if_false)}) - return nestkernel.llapi_conditional_parameter(condition._datum, param_if_true._datum, param_if_false._datum) diff --git a/pynest/nest/logic/hl_api_logic.py b/pynest/nest/logic/hl_api_logic.py index 210816b0cc..eafa67642c 100644 --- a/pynest/nest/logic/hl_api_logic.py +++ b/pynest/nest/logic/hl_api_logic.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# hl_api_logic.py +# _hl_api_logic.py # # This file is part of NEST. # @@ -19,11 +19,11 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from .. import nestkernel_api as nestkernel from ..lib.hl_api_types import CreateParameter +from .. import nestkernel_api as nestkernel __all__ = [ - "conditional", + 'conditional', ] @@ -46,7 +46,9 @@ def conditional(condition, param_if_true, param_if_false): Object representing the conditional. """ if isinstance(param_if_true, (int, float)): - param_if_true = CreateParameter("constant", {"value": float(param_if_true)}) + param_if_true = CreateParameter( + 'constant', {'value': float(param_if_true)}) if isinstance(param_if_false, (int, float)): - param_if_false = CreateParameter("constant", {"value": float(param_if_false)}) + param_if_false = CreateParameter( + 'constant', {'value': float(param_if_false)}) return nestkernel.llapi_conditional_parameter(condition._datum, param_if_true._datum, param_if_false._datum) diff --git a/pynest/nest/math/_hl_api_math.py b/pynest/nest/math/_hl_api_math.py deleted file mode 100644 index 8397e4aa90..0000000000 --- a/pynest/nest/math/_hl_api_math.py +++ /dev/null @@ -1,146 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_math.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -from .. import nestkernel_api as nestkernel - - -__all__ = [ - 'exp', - 'sin', - 'cos', - 'min', - 'max', - 'redraw', -] - -# TODO: Special cases when argument is a number? - - -def exp(parameter): - """ - Calculate the exponential of the parameter - - Parameters - ---------- - parameter : Parameter - Input Parameter. - - Returns - ------- - Parameter: - Object representing the exponential of the parameter. - """ - return nestkernel.llapi_exp_parameter(parameter._datum) - - -def sin(parameter): - """ - Calculate the sine of the parameter - - Parameters - ---------- - parameter : Parameter - Input Parameter. - - Returns - ------- - Parameter: - Object representing the sine of the parameter. - """ - return nestkernel.llapi_sin_parameter(parameter._datum) - - -def cos(parameter): - """ - Calculate the cosine of the parameter - - Parameters - ---------- - parameter : Parameter - Input Parameter. - - Returns - ------- - Parameter: - Object representing the cosine of the parameter. - """ - return nestkernel.llapi_cos_parameter(parameter._datum) - - -def min(parameter, value): - """ - Yields the smallest value of the value of a parameter and a given value - - Parameters - ---------- - parameter : Parameter - Input Parameter. - value : float - Value to compare against. - - Returns - ------- - Parameter: - Object yielding the smallest value. - """ - return nestkernel.llapi_min_parameter(parameter._datum, float(value)) - - -def max(parameter, value): - """ - Yields the largest value of the value of a parameter and a given value - - Parameters - ---------- - parameter : Parameter - Input Parameter. - value : float - Value to compare against. - - Returns - ------- - Parameter: - Object yielding the largest value. - """ - return nestkernel.llapi_max_parameter(parameter._datum, float(value)) - - -def redraw(parameter, min, max): - """ - Redraws the value of the parameter if it is outside of the given limits - - Both min and max values are included in the limit. If the number of redraws exceeds 1000, an error is thrown. - - Parameters - ---------- - parameter : Parameter - Input Parameter. - min : float - Lower bound of the value. - max : float - Upper bound of the value. - - Returns - ------- - Parameter: - Object redrawing the parameter until it can yield a value within the given limits. - """ - return nestkernel.llapi_redraw_parameter(parameter._datum, float(min), float(max)) diff --git a/pynest/nest/plot/_raster_plot.py b/pynest/nest/plot/_raster_plot.py deleted file mode 100644 index 0264025f0b..0000000000 --- a/pynest/nest/plot/_raster_plot.py +++ /dev/null @@ -1,373 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _raster_plot.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" Functions for raster plotting.""" -import nest -import numpy as _np -import numpy.typing as _npt -import functools as _functools -import typing as _typing - -__all__ = [ - "extract_events", - "raster_plot", -] - - -def extract_events(data, time=None, sel=None): - """Extract all events within a given time interval. - - Both time and sel may be used at the same time such that all - events are extracted for which both conditions are true. - - Parameters - ---------- - data : list - Matrix such that - data[:,0] is a vector of all node_ids and - data[:,1] a vector with the corresponding time stamps. - time : list, optional - List with at most two entries such that - time=[t_max] extracts all events with t< t_max - time=[t_min, t_max] extracts all events with t_min <= t < t_max - sel : list, optional - List of node_ids such that - sel=[node_id1, ... , node_idn] extracts all events from these node_ids. - All others are discarded. - - Returns - ------- - numpy.array - List of events as (node_id, t) tuples - """ - val = [] - - if time: - t_max = time[-1] - if len(time) > 1: - t_min = time[0] - else: - t_min = 0 - - for v in data: - t = v[1] - node_id = v[0] - if time and (t < t_min or t >= t_max): - continue - if not sel or node_id in sel: - val.append(v) - - return _np.array(val) - - -@_functools.singledispatch -def raster_plot( - data: _typing.Any = None, - /, - files=None, - **kwargs, -): - raise TypeError("The first argument must be either a filename or data") - - -@raster_plot.register -def _raster_plot_from_data( - data: _npt.NDArray, - sel=None, - **kwargs, -): - """Plot raster plot from data array. - - Parameters - ---------- - data : list - Matrix such that - data[:,0] is a vector of all node_ids and - data[:,1] a vector with the corresponding time stamps. - sel : list, optional - List of node_ids such that - sel=[node_id1, ... , node_idn] extracts all events from these node_ids. - All others are discarded. - kwargs: - Parameters passed to _make_plot - """ - if len(data) == 0: - raise Exception("No data to plot.") - ts = data[:, 1] - d = extract_events(data, sel=sel) - ts1 = d[:, 1] - node_ids = d[:, 0] - - return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs) - - -@raster_plot.register -def _raster_plot_from_file(fnames: _typing.Union[str, _typing.Iterable[str]], **kwargs): - """Plot raster from file. - - Parameters - ---------- - fnames : str or tuple(str) or list(str) - File name or list of file names - - If a list of files is given, the data from them is concatenated as if - it had been stored in a single file - useful when MPI is enabled and - data is logged separately for each MPI rank, for example. - kwargs: - Parameters passed to _make_plot - """ - if isinstance(fnames, str): - fnames = [fnames] - try: - import pandas - except ImportError: - _from_file_numpy(fnames, **kwargs) - else: - _from_file_pandas(fnames, **kwargs) - - -def _from_file_pandas(fname, **kwargs): - """Use pandas.""" - import pandas - - data = None - for f in fname: - dataFrame = pandas.read_table(f, header=2, skipinitialspace=True) - newdata = dataFrame.values - - if data is None: - data = newdata - else: - data = _np.concatenate((data, newdata)) - - return _raster_plot_from_data(data, **kwargs) - - -def _from_file_numpy(fname, **kwargs): - """Use numpy.""" - data = None - for f in fname: - newdata = _np.loadtxt(f, skiprows=3) - - if data is None: - data = newdata - else: - data = _np.concatenate((data, newdata)) - - return _raster_plot_from_data(data, **kwargs) - - -def _raster_plot_from_device(detec, **kwargs): - """ - Plot raster from a spike recorder. - - Parameters - ---------- - detec : TYPE - Description - kwargs: - Parameters passed to _make_plot - - Raises - ------ - nest.kernel.NESTError - """ - - type_id = nest.GetDefaults(detec.get("model"), "type_id") - if not type_id == "spike_recorder": - raise nest.kernel.NESTError("Please provide a spike_recorder.") - - if detec.get("record_to") == "memory": - - ts, node_ids = _from_memory(detec) - - if not len(ts): - raise nest.kernel.NESTError("No events recorded!") - - if "title" not in kwargs: - kwargs["title"] = "Raster plot from device '%i'" % detec.get("global_id") - - if detec.get("time_in_steps"): - xlabel = "Steps" - else: - xlabel = "Time (ms)" - - return _make_plot(ts, ts, node_ids, node_ids, xlabel=xlabel, **kwargs) - - elif detec.get("record_to") == "ascii": - fname = detec.get("filenames") - return from_file(fname, **kwargs) - - else: - raise nest.kernel.NESTError( - "No data to plot. Make sure that \ - record_to is set to either 'ascii' or 'memory'." - ) - - -def _from_memory(detec): - ev = detec.get("events") - return ev["times"], ev["senders"] - - -def _make_plot( - ts, - ts1, - node_ids, - neurons, - hist=True, - hist_binwidth=5.0, - grayscale=False, - title=None, - xlabel=None, -): - """Generic plotting routine. - - Constructs a raster plot along with an optional histogram (common part in - all routines above). - - Parameters - ---------- - ts : list - All timestamps - ts1 : list - Timestamps corresponding to node_ids - node_ids : list - Global ids corresponding to ts1 - neurons : list - Node IDs of neurons to plot - hist : bool, optional - Display histogram - hist_binwidth : float, optional - Width of histogram bins - grayscale : bool, optional - Plot in grayscale - title : str, optional - Plot title - xlabel : str, optional - Label for x-axis - """ - import matplotlib.pyplot as plt - - plt.figure() - - if grayscale: - color_marker = ".k" - color_bar = "gray" - else: - color_marker = "." - color_bar = "blue" - - color_edge = "black" - - if xlabel is None: - xlabel = "Time (ms)" - - ylabel = "Neuron ID" - - if hist: - ax1 = plt.axes([0.1, 0.3, 0.85, 0.6]) - plotid = plt.plot(ts1, node_ids, color_marker) - plt.ylabel(ylabel) - plt.xticks([]) - xlim = plt.xlim() - - plt.axes([0.1, 0.1, 0.85, 0.17]) - t_bins = _np.arange(_np.amin(ts), _np.amax(ts), float(hist_binwidth)) - n, _ = _histogram(ts, bins=t_bins) - num_neurons = len(_np.unique(neurons)) - heights = 1000 * n / (hist_binwidth * num_neurons) - - plt.bar( - t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge - ) - plt.yticks([int(x) for x in _np.linspace(0.0, int(max(heights) * 1.1) + 5, 4)]) - plt.ylabel("Rate (Hz)") - plt.xlabel(xlabel) - plt.xlim(xlim) - plt.axes(ax1) - else: - plotid = plt.plot(ts1, node_ids, color_marker) - plt.xlabel(xlabel) - plt.ylabel(ylabel) - - if title is None: - plt.title("Raster plot") - else: - plt.title(title) - - plt.draw() - - return plotid - - -def _histogram(a, bins=10, bin_range=None, normed=False): - """Calculate histogram for data. - - Parameters - ---------- - a : list - Data to calculate histogram for - bins : int, optional - Number of bins - bin_range : TYPE, optional - Range of bins - normed : bool, optional - Whether distribution should be normalized - - Raises - ------ - ValueError - """ - from numpy import asarray, iterable, linspace, sort, concatenate - - a = asarray(a).ravel() - - if bin_range is not None: - mn, mx = bin_range - if mn > mx: - raise ValueError("max must be larger than min in range parameter") - - if not iterable(bins): - if bin_range is None: - bin_range = (a.min(), a.max()) - mn, mx = [mi + 0.0 for mi in bin_range] - if mn == mx: - mn -= 0.5 - mx += 0.5 - bins = linspace(mn, mx, bins, endpoint=False) - else: - if (bins[1:] - bins[:-1] < 0).any(): - raise ValueError("bins must increase monotonically") - - # best block size probably depends on processor cache size - block = 65536 - n = sort(a[:block]).searchsorted(bins) - for i in range(block, a.size, block): - n += sort(a[i : i + block]).searchsorted(bins) - n = concatenate([n, [len(a)]]) - n = n[1:] - n[:-1] - - if normed: - db = bins[1] - bins[0] - return 1.0 / (a.size * db) * n, bins - else: - return n, bins diff --git a/pynest/nest/plot/_visualization.py b/pynest/nest/plot/_visualization.py deleted file mode 100644 index 5e2df8bcb5..0000000000 --- a/pynest/nest/plot/_visualization.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _visualization.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions to visualize a network built in NEST. -""" - -import pydot -import nest - - -def plot_network(nodes, filename, ext_conns=False, plot_modelnames=False): - """Plot the given nodes and the connections that originate from - them. - - This function depends on the availability of the pydot module. - - Simplified version for NEST 3. - - Parameters - ---------- - nodes : NodeCollection - NodeCollection containing node IDs of nodes to plot - filename : str - Filename to save the plot to. Can end either in .pdf or .png to - determine the type of the output. - ext_conns : bool, optional - Draw connections to targets that are not in nodes. If it is True, - these are drawn to a node named 'ext'. - plot_modelnames : bool, optional - Description - - Raises - ------ - nest.kernel.NESTError - """ - - if len(nodes) == 0: - nest.kernel.NESTError("nodes must at least contain one node") - - if not isinstance(nodes, nest.NodeCollection): - raise nest.kernel.NESTError("nodes must be a NodeCollection") - - if ext_conns: - raise NotImplementedError("ext_conns") - if plot_modelnames: - raise NotImplementedError("plot_modelnames") - - conns = nest.GetConnections(nodes) - - graph = pydot.Dot(rankdir="LR", ranksep="5") - for source, target in zip(conns.sources(), conns.targets()): - graph.add_edge(pydot.Edge(str(source), str(target))) - - filetype = filename.rsplit(".", 1)[1] - if filetype == "pdf": - graph.write_pdf(filename) - elif filetype == "png": - graph.write_png(filename) - else: - raise nest.kernel.NESTError("Filename must end in '.png' or '.pdf'.") diff --git a/pynest/nest/plot/_voltage_trace.py b/pynest/nest/plot/_voltage_trace.py deleted file mode 100644 index 5c3eb7c802..0000000000 --- a/pynest/nest/plot/_voltage_trace.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _voltage_trace.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -""" -Functions to plot voltage traces. -""" - -import nest -import numpy - -__all__ = [ - "from_device", - "from_file", -] - -def from_file(fname, title=None, grayscale=False): - """Plot voltage trace from file. - - Parameters - ---------- - fname : str or list - Filename or list of filenames to load from - title : str, optional - Plot title - grayscale : bool, optional - Plot in grayscale - - Raises - ------ - ValueError - """ - import matplotlib.pyplot as plt - - if isinstance(fname, (list, tuple)): - data = None - for file in fname: - if data is None: - data = numpy.loadtxt(file) - else: - data = numpy.concatenate((data, numpy.loadtxt(file))) - else: - data = numpy.loadtxt(fname) - - if grayscale: - line_style = "k" - else: - line_style = "" - - if len(data.shape) == 1: - print( - "INFO: only found 1 column in the file. \ - Assuming that only one neuron was recorded." - ) - plotid = plt.plot(data, line_style) - plt.xlabel("Time (steps of length interval)") - - elif data.shape[1] == 2: - print( - "INFO: found 2 columns in the file. Assuming \ - them to be node ID, pot." - ) - - plotid = [] - data_dict = {} - for dat in data: - if not dat[0] in data_dict: - data_dict[dat[0]] = [dat[1]] - else: - data_dict[dat[0]].append(dat[1]) - - for dat in data_dict: - plotid.append(plt.plot(data_dict[dat], line_style, label="Neuron %i" % dat)) - - plt.xlabel("Time (steps of length interval)") - plt.legend() - - elif data.shape[1] == 3: - plotid = [] - data_dict = {} - g = data[0][0] - t = [] - for d in data: - if not d[0] in data_dict: - data_dict[d[0]] = [d[2]] - else: - data_dict[d[0]].append(d[2]) - if d[0] == g: - t.append(d[1]) - - for d in data_dict: - plotid.append(plt.plot(t, data_dict[d], line_style, label="Neuron %i" % d)) - - plt.xlabel("Time (ms)") - plt.legend() - - else: - raise ValueError("Inappropriate data shape %i!" % data.shape) - - if not title: - title = "Membrane potential from file '%s'" % fname - - plt.title(title) - plt.ylabel("Membrane potential (mV)") - plt.draw() - - return plotid - - -def from_device(detec, neurons=None, title=None, grayscale=False, timeunit="ms"): - """Plot the membrane potential of a set of neurons recorded by - the given voltmeter or multimeter. - - Parameters - ---------- - detec : list - Global id of voltmeter or multimeter in a list, e.g. [1] - neurons : list, optional - Indices of of neurons to plot - title : str, optional - Plot title - grayscale : bool, optional - Plot in grayscale - timeunit : str, optional - Unit of time - - Raises - ------ - nest.NESTError - Description - """ - import matplotlib.pyplot as plt - - if len(detec) > 1: - raise nest.NESTError("Please provide a single voltmeter.") - - type_id = nest.GetDefaults(detec.get("model"), "type_id") - if type_id not in ("voltmeter", "multimeter"): - raise nest.NESTError( - "Please provide a voltmeter or a \ - multimeter measuring V_m." - ) - elif type_id == "multimeter": - if "V_m" not in detec.get("record_from"): - raise nest.NESTError( - "Please provide a multimeter \ - measuring V_m." - ) - elif not detec.get("record_to") == "memory" and len(detec.get("record_from")) > 1: - raise nest.NESTError( - "Please provide a multimeter \ - measuring only V_m or record to memory!" - ) - - if detec.get("record_to") == "memory": - timefactor = 1.0 - if not detec.get("time_in_steps"): - if timeunit == "s": - timefactor = 1000.0 - else: - timeunit = "ms" - - times, voltages = _from_memory(detec) - - if not len(times): - raise nest.NESTError("No events recorded!") - - if neurons is None: - neurons = voltages.keys() - - plotids = [] - for neuron in neurons: - time_values = numpy.array(times[neuron]) / timefactor - - if grayscale: - line_style = "k" - else: - line_style = "" - - try: - plotids.append(plt.plot(time_values, voltages[neuron], line_style, label="Neuron %i" % neuron)) - except KeyError: - print("INFO: Wrong ID: {0}".format(neuron)) - - if not title: - title = "Membrane potential" - plt.title(title) - - plt.ylabel("Membrane potential (mV)") - - if detec.time_in_steps: - plt.xlabel("Steps") - else: - plt.xlabel("Time (%s)" % timeunit) - - plt.legend(loc="best") - plt.draw() - - return plotids - - elif detec.get("record_to") == "ascii": - fname = detec.get("filenames") - return from_file(fname, title, grayscale) - else: - raise nest.NESTError("Provided devices neither record to ascii file, nor to memory.") - - -def _from_memory(detec): - """Get voltage traces from memory. - ---------- - detec : list - Global id of voltmeter or multimeter - """ - import array - - ev = detec.get("events") - potentials = ev["V_m"] - senders = ev["senders"] - - v = {} - t = {} - - if "times" in ev: - times = ev["times"] - for s, currentsender in enumerate(senders): - if currentsender not in v: - v[currentsender] = array.array("f") - t[currentsender] = array.array("f") - - v[currentsender].append(float(potentials[s])) - t[currentsender].append(float(times[s])) - else: - # reconstruct the time vector, if not stored explicitly - origin = detec.get("origin") - start = detec.get("start") - interval = detec.get("interval") - senders_uniq = numpy.unique(senders) - num_intvls = len(senders) / len(senders_uniq) - times_s = origin + start + interval + interval * numpy.array(range(num_intvls)) - - for s, currentsender in enumerate(senders): - if currentsender not in v: - v[currentsender] = array.array("f") - t[currentsender] = times_s - v[currentsender].append(float(potentials[s])) - - return t, v diff --git a/pynest/nest/random/_hl_api_random.py b/pynest/nest/random/_hl_api_random.py deleted file mode 100644 index 5a279a85a0..0000000000 --- a/pynest/nest/random/_hl_api_random.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_random.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -from ..lib._hl_api_types import CreateParameter - -__all__ = [ - 'exponential', - 'lognormal', - 'normal', - 'uniform', - 'uniform_int', -] - - -def uniform(min=0.0, max=1.0): - """ - Draws samples from a uniform distribution. - - Samples are distributed uniformly in [min, max) (includes min, but excludes max). - - Note - ---- - See :ref:`this documentation ` for details on the effect - of time discretization on delays drawn from a uniform distribution. - - Parameters - ---------- - min : float, optional - Lower boundary of the sample interval. Default value is 0. - max : float, optional - Upper boundary of the sample interval. Default value is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('uniform', {'min': min, 'max': max}) - - -def uniform_int(max): - """ - Draws integer samples from a uniform distribution. - - Samples are distributed uniformly in [0, max) (includes 0, but excludes max). - - Parameters - ---------- - max : integer - Upper boundary of the sample interval. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('uniform_int', {'max': max}) - - -def normal(mean=0.0, std=1.0): - """ - Draws samples from a normal distribution. - - Parameters - ---------- - mean : float, optional - Mean of the distribution. Default value is 0. - std : float, optional - Standard deviation of the distribution. Default value is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('normal', {'mean': mean, 'std': std}) - - -def exponential(beta=1.0): - """ - Draws samples from an exponential distribution. - - Parameters - ---------- - beta : float, optional - Scale parameter the distribution. Default value is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('exponential', {'beta': beta}) - - -def lognormal(mean=0.0, std=1.0): - """ - Draws samples from a log-normal distribution. - - Parameters - ---------- - mean : float, optional - Mean value of the underlying normal distribution. Default value is 0. - std : float, optional - Standard deviation of the underlying normal distribution. Default value is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('lognormal', {'mean': mean, 'std': std}) diff --git a/pynest/nest/server/hl_api_server.py b/pynest/nest/server/2hl_api_server.py similarity index 100% rename from pynest/nest/server/hl_api_server.py rename to pynest/nest/server/2hl_api_server.py diff --git a/pynest/nest/spatial/hl_api_spatial.py b/pynest/nest/spatial/hl_api_spatial2.py similarity index 100% rename from pynest/nest/spatial/hl_api_spatial.py rename to pynest/nest/spatial/hl_api_spatial2.py diff --git a/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py b/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py index 96dd7835ef..9ed64268ee 100644 --- a/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py +++ b/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py @@ -1,148 +1,148 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_spatial_distributions.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -from ..math import exp -from ..lib._hl_api_types import CreateParameter - -try: - import scipy.special - HAVE_SCIPY = True -except ImportError: - HAVE_SCIPY = False - - -__all__ = [ - 'exponential', - 'gaussian', - 'gaussian2D', - 'gamma', -] - - -def exponential(x, beta=1.0): - """ - Applies an exponential distribution on a Parameter. - - Parameters - ---------- - x : Parameter - Input Parameter. - beta : float, optional - Scale parameter. Default is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('exp_distribution', { - 'x': x, - 'beta': beta, - }) - - -def gaussian(x, mean=0.0, std=1.0): - """ - Applies a gaussian distribution on a Parameter. - - Parameters - ---------- - x : Parameter - Input Parameter. - mean : float, optional - Mean of the distribution. Default is 0.0. - std : float, optional - Standard deviation of the distribution. Default is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('gaussian', { - 'x': x, - 'mean': mean, - 'std': std, - }) - - -def gaussian2D(x, y, mean_x=0.0, mean_y=0.0, std_x=1.0, std_y=1.0, rho=0.0): - """ - Applies a bivariate gaussian distribution on two Parameters, representing values in the x and y direction. - - Parameters - ---------- - x : Parameter - Input Parameter for the x-direction. - y : Parameter - Input Parameter for the y-direction. - mean_x : float, optional - Mean of the distribution in the x-direction. Default is 0.0. - mean_y : float, optional - Mean of the distribution in the y-direction. Default is 0.0. - std_x : float, optional - Standard deviation of the distribution in the x-direction. Default is 1.0. - std_y : float, optional - Standard deviation of the distribution in the y-direction. Default is 1.0. - rho : float, optional - Correlation of x and y. Default is 0.0 - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('gaussian2d', { - 'x': x, - 'y': y, - 'mean_x': mean_x, - 'mean_y': mean_y, - 'std_x': std_x, - 'std_y': std_y, - 'rho': rho, - }) - - -def gamma(x, kappa=1.0, theta=1.0): - """ - Applies a gamma distribution on a Parameter. - - This function requires SciPy, and will raise an error if SciPy cannot be imported. - - Parameters - ---------- - x : Parameter - Input Parameter. - kappa : float, optional - Shape parameter. Default is 1.0. - theta : float, optional - Scale parameter. Default is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('gamma', { - 'x': x, - 'kappa': kappa, - 'theta': theta - }) +# -*- coding: utf-8 -*- +# +# _hl_api_spatial_distributions.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from ..math import exp +from ..lib._hl_api_types import CreateParameter + +try: + import scipy.special + HAVE_SCIPY = True +except ImportError: + HAVE_SCIPY = False + + +__all__ = [ + 'exponential', + 'gaussian', + 'gaussian2D', + 'gamma', +] + + +def exponential(x, beta=1.0): + """ + Applies an exponential distribution on a Parameter. + + Parameters + ---------- + x : Parameter + Input Parameter. + beta : float, optional + Scale parameter. Default is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('exp_distribution', { + 'x': x, + 'beta': beta, + }) + + +def gaussian(x, mean=0.0, std=1.0): + """ + Applies a gaussian distribution on a Parameter. + + Parameters + ---------- + x : Parameter + Input Parameter. + mean : float, optional + Mean of the distribution. Default is 0.0. + std : float, optional + Standard deviation of the distribution. Default is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('gaussian', { + 'x': x, + 'mean': mean, + 'std': std, + }) + + +def gaussian2D(x, y, mean_x=0.0, mean_y=0.0, std_x=1.0, std_y=1.0, rho=0.0): + """ + Applies a bivariate gaussian distribution on two Parameters, representing values in the x and y direction. + + Parameters + ---------- + x : Parameter + Input Parameter for the x-direction. + y : Parameter + Input Parameter for the y-direction. + mean_x : float, optional + Mean of the distribution in the x-direction. Default is 0.0. + mean_y : float, optional + Mean of the distribution in the y-direction. Default is 0.0. + std_x : float, optional + Standard deviation of the distribution in the x-direction. Default is 1.0. + std_y : float, optional + Standard deviation of the distribution in the y-direction. Default is 1.0. + rho : float, optional + Correlation of x and y. Default is 0.0 + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('gaussian2d', { + 'x': x, + 'y': y, + 'mean_x': mean_x, + 'mean_y': mean_y, + 'std_x': std_x, + 'std_y': std_y, + 'rho': rho, + }) + + +def gamma(x, kappa=1.0, theta=1.0): + """ + Applies a gamma distribution on a Parameter. + + This function requires SciPy, and will raise an error if SciPy cannot be imported. + + Parameters + ---------- + x : Parameter + Input Parameter. + kappa : float, optional + Shape parameter. Default is 1.0. + theta : float, optional + Scale parameter. Default is 1.0. + + Returns + ------- + Parameter: + Object yielding values drawn from the distribution. + """ + return CreateParameter('gamma', { + 'x': x, + 'kappa': kappa, + 'theta': theta + }) diff --git a/pynest/nest/spatial_distributions/hl_api_spatial_distributions.py b/pynest/nest/spatial_distributions/hl_api_spatial_distributions2.py similarity index 100% rename from pynest/nest/spatial_distributions/hl_api_spatial_distributions.py rename to pynest/nest/spatial_distributions/hl_api_spatial_distributions2.py diff --git a/pynest/nestkernel_api.pyx b/pynest/nestkernel_api.pyx index 925783f445..f4cffb88f9 100644 --- a/pynest/nestkernel_api.pyx +++ b/pynest/nestkernel_api.pyx @@ -35,7 +35,7 @@ from libcpp.vector cimport vector import nest import numpy -from nest._lib.hl_api_exceptions import NESTErrors +from nest.lib.hl_api_exceptions import NESTErrors # cimport numpy diff --git a/pyproject.toml b/pyproject.toml index 87b3f3774e..0a785912a6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ BOOST_ROOT="/boost" GSL_ROOT_DIR="/gsl" [tool.cibuildwheel.linux] -before-build = "python3 extras/wheelbuild/prepare_container.py" +before-build = "python3 build_support/prepare_wheel_container.py" [tool.pytest.ini_options] markers = [ From ecc614fc4cc62397acbbcee6c47dd93f0167faee Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Thu, 14 Sep 2023 22:25:19 +0200 Subject: [PATCH 10/17] install omp in container --- build_support/prepare_wheel_container.py | 1 + 1 file changed, 1 insertion(+) diff --git a/build_support/prepare_wheel_container.py b/build_support/prepare_wheel_container.py index b39e1b7178..ab8dba4048 100644 --- a/build_support/prepare_wheel_container.py +++ b/build_support/prepare_wheel_container.py @@ -11,6 +11,7 @@ def main(): + install_omp() # Containers run multiple builds, so check if a previous build has installed the # dependency already if not os.path.exists("/boost"): From a15ab8cef057d3772dd50d50611be0d5eeee18c8 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Thu, 14 Sep 2023 23:08:42 +0200 Subject: [PATCH 11/17] fix bad merge pynest imports --- examples/BrodyHopfield.py | 3 +- examples/brunel_alpha_nest.py | 3 +- examples/brunel_delta_nest.py | 3 +- examples/brunel_exp_multisynapse_nest.py | 3 +- examples/gif_population.py | 3 +- examples/hpc_benchmark.py | 2 +- examples/repeated_stimulation.py | 3 +- pynest/nest/lib/hl_api_spatial.py | 2 +- pynest/nest/ll_api.py | 197 +++++++ pynest/nest/{ => plot}/raster_plot.py | 0 pynest/nest/{ => plot}/visualization.py | 0 pynest/nest/server/__init__.py | 2 +- pynest/nest/server/_hl_api_server.py | 498 ------------------ .../{2hl_api_server.py => hl_api_server.py} | 0 pynest/nest/spatial/__init__.py | 4 +- pynest/nest/spatial/_hl_api_spatial.py | 264 ---------- .../{hl_api_spatial2.py => hl_api_spatial.py} | 0 pynest/nest/spatial_distributions/__init__.py | 2 +- .../_hl_api_spatial_distributions.py | 148 ------ ...ns2.py => hl_api_spatial_distributions.py} | 0 testsuite/pytests/test_visualization.py | 2 +- 21 files changed, 210 insertions(+), 929 deletions(-) create mode 100644 pynest/nest/ll_api.py rename pynest/nest/{ => plot}/raster_plot.py (100%) rename pynest/nest/{ => plot}/visualization.py (100%) delete mode 100644 pynest/nest/server/_hl_api_server.py rename pynest/nest/server/{2hl_api_server.py => hl_api_server.py} (100%) delete mode 100644 pynest/nest/spatial/_hl_api_spatial.py rename pynest/nest/spatial/{hl_api_spatial2.py => hl_api_spatial.py} (100%) delete mode 100644 pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py rename pynest/nest/spatial_distributions/{hl_api_spatial_distributions2.py => hl_api_spatial_distributions.py} (100%) diff --git a/examples/BrodyHopfield.py b/examples/BrodyHopfield.py index 66c1357a91..5b5453225a 100755 --- a/examples/BrodyHopfield.py +++ b/examples/BrodyHopfield.py @@ -46,8 +46,7 @@ # First, we import all necessary modules for simulation, analysis, and plotting. import matplotlib.pyplot as plt -import nest -import nest.raster_plot +import nest.plot.raster_plot ############################################################################### # Second, the simulation parameters are assigned to variables. diff --git a/examples/brunel_alpha_nest.py b/examples/brunel_alpha_nest.py index cae0a6ca1e..2cac1d0b17 100755 --- a/examples/brunel_alpha_nest.py +++ b/examples/brunel_alpha_nest.py @@ -51,8 +51,7 @@ import time import matplotlib.pyplot as plt -import nest -import nest.raster_plot +import nest.plot.raster_plot import numpy as np import scipy.special as sp diff --git a/examples/brunel_delta_nest.py b/examples/brunel_delta_nest.py index 6d8d27bd69..694e38f688 100755 --- a/examples/brunel_delta_nest.py +++ b/examples/brunel_delta_nest.py @@ -47,8 +47,7 @@ import time import matplotlib.pyplot as plt -import nest -import nest.raster_plot +import nest.plot.raster_plot nest.ResetKernel() diff --git a/examples/brunel_exp_multisynapse_nest.py b/examples/brunel_exp_multisynapse_nest.py index bcd5eec8ca..31727211b4 100644 --- a/examples/brunel_exp_multisynapse_nest.py +++ b/examples/brunel_exp_multisynapse_nest.py @@ -59,8 +59,7 @@ import time import matplotlib.pyplot as plt -import nest -import nest.raster_plot +import nest.plot.raster_plot nest.ResetKernel() diff --git a/examples/gif_population.py b/examples/gif_population.py index 9f73dbccdb..9a0980f5ab 100644 --- a/examples/gif_population.py +++ b/examples/gif_population.py @@ -51,8 +51,7 @@ # Import all necessary modules for simulation and plotting. import matplotlib.pyplot as plt -import nest -import nest.raster_plot +import nest.plot.raster_plot nest.ResetKernel() diff --git a/examples/hpc_benchmark.py b/examples/hpc_benchmark.py index 32acf595a3..f7ce6e1cad 100644 --- a/examples/hpc_benchmark.py +++ b/examples/hpc_benchmark.py @@ -95,7 +95,7 @@ import time import nest -import nest.raster_plot +import nest.plot.raster_plot import numpy as np import scipy.special as sp diff --git a/examples/repeated_stimulation.py b/examples/repeated_stimulation.py index cd2fe44513..977ff39124 100644 --- a/examples/repeated_stimulation.py +++ b/examples/repeated_stimulation.py @@ -45,8 +45,7 @@ import matplotlib.pyplot as plt -import nest -import nest.raster_plot +import nest.plot.raster_plot ############################################################################### # Second, we set the parameters so the ``poisson_generator`` generates 1000 diff --git a/pynest/nest/lib/hl_api_spatial.py b/pynest/nest/lib/hl_api_spatial.py index 6d474efb7c..815efbe99f 100644 --- a/pynest/nest/lib/hl_api_spatial.py +++ b/pynest/nest/lib/hl_api_spatial.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_spatial.py +# hl_api_spatial.py # # This file is part of NEST. # diff --git a/pynest/nest/ll_api.py b/pynest/nest/ll_api.py new file mode 100644 index 0000000000..6418838087 --- /dev/null +++ b/pynest/nest/ll_api.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# +# ll_api.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Low-level API of PyNEST Module +""" + +# Since this is a low level module, we need some more trickery, thus: +# pylint: disable=wrong-import-position + +import functools +import inspect +import keyword +import os +import sys + +# This is a workaround for readline import errors encountered with Anaconda +# Python running on Ubuntu, when invoked from the terminal +# "python -c 'import nest'" +if "linux" in sys.platform and "Anaconda" in sys.version: + import readline # noqa: F401 + +# This is a workaround to avoid segmentation faults when importing +# scipy *after* nest. See https://github.com/numpy/numpy/issues/2521 +try: + import scipy # noqa: F401 +except ImportError: + pass + +# Make MPI-enabled NEST import properly. The underlying problem is that the +# shared object pynestkernel dynamically opens other libraries that open +# yet other libraries. +sys.setdlopenflags(os.RTLD_NOW | os.RTLD_GLOBAL) + +from . import nestkernel_api as nestkernel # noqa +from .lib.hl_api_exceptions import NESTError, NESTErrors + +__all__ = [ + "set_communicator", + # 'take_array_index', + "KernelAttribute", +] + + +initialized = False + + +def set_communicator(comm): + """Set global communicator for NEST. + + Parameters + ---------- + comm: MPI.Comm from mpi4py + + Raises + ------ + nestkernel.NESTError + """ + + if "mpi4py" not in sys.modules: + raise NESTError("set_communicator: " "mpi4py not loaded.") + + # TODO-PYNEST-NG: set_communicator + # engine.set_communicator(comm) + + +class KernelAttribute: + """ + Descriptor that dispatches attribute access to the nest kernel. + """ + + def __init__(self, typehint, description, readonly=False, default=None, localonly=False): + self._readonly = readonly + self._localonly = localonly + self._default = default + + readonly = readonly and "**read only**" + localonly = localonly and "**local only**" + + self.__doc__ = ( + description + + ("." if default is None else f", defaults to ``{default}``.") + + ("\n\n" if readonly or localonly else "") + + ", ".join(c for c in (readonly, localonly) if c) + + f"\n\n:type: {typehint}" + ) + + def __set_name__(self, cls, name): + self._name = name + self._full_status = name == "kernel_status" + + def __get__(self, instance, cls=None): + if instance is None: + return self + + status_root = nestkernel.llapi_get_kernel_status() + + if self._full_status: + return status_root + else: + return status_root[self._name] + + def __set__(self, instance, value): + if self._readonly: + msg = f"`{self._name}` is a read only kernel attribute." + raise AttributeError(msg) + nestkernel.llapi_set_kernel_status({self._name: value}) + + +def init(argv): + """Initializes NEST. + + If the environment variable PYNEST_QUIET is set, NEST will not print + welcome text containing the version and other information. Likewise, + if the environment variable PYNEST_DEBUG is set, NEST starts in debug + mode. Note that the same effect can be achieved by using the + commandline arguments --quiet and --debug respectively. + + Parameters + ---------- + argv : list + Command line arguments, passed to the NEST kernel + + Raises + ------ + nestkernel.NESTError.PyNESTError + """ + + global initialized + + if initialized: + raise NESTErrors.PyNESTError("NEST already initialized.") + + # Some commandline arguments of NEST and Python have the same + # name, but different meaning. To avoid unintended behavior, we + # handle NEST's arguments here and pass it a modified copy, while + # we leave the original list unchanged for further use by the user + # or other modules. + nest_argv = argv[:] + + quiet = "--quiet" in nest_argv or "PYNEST_QUIET" in os.environ + if "--quiet" in nest_argv: + nest_argv.remove("--quiet") + if "--debug" in nest_argv: + nest_argv.remove("--debug") + if "--sli-debug" in nest_argv: + nest_argv.remove("--sli-debug") + nest_argv.append("--debug") + + if "PYNEST_DEBUG" in os.environ and "--debug" not in nest_argv: + nest_argv.append("--debug") + + path = os.path.dirname(__file__) + nestkernel.init(nest_argv) + + if not quiet: + print("NEST initialized successfully!") + + # Dirty hack to get tab-completion for models in IPython. + try: + __IPYTHON__ + except NameError: + pass + else: + from .lib.hl_api_simulation import GetKernelStatus # noqa + + keyword_lists = ( + "connection_rules", + "node_models", + "recording_backends", + "rng_types", + "stimulation_backends", + "synapse_models", + ) + for kwl in keyword_lists: + keyword.kwlist += GetKernelStatus(kwl) + + +init(sys.argv) diff --git a/pynest/nest/raster_plot.py b/pynest/nest/plot/raster_plot.py similarity index 100% rename from pynest/nest/raster_plot.py rename to pynest/nest/plot/raster_plot.py diff --git a/pynest/nest/visualization.py b/pynest/nest/plot/visualization.py similarity index 100% rename from pynest/nest/visualization.py rename to pynest/nest/plot/visualization.py diff --git a/pynest/nest/server/__init__.py b/pynest/nest/server/__init__.py index 597a47da7a..0ea1b54eda 100644 --- a/pynest/nest/server/__init__.py +++ b/pynest/nest/server/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ._hl_api_server import * # noqa: F401,F403 +from .hl_api_server import * # noqa: F401,F403 diff --git a/pynest/nest/server/_hl_api_server.py b/pynest/nest/server/_hl_api_server.py deleted file mode 100644 index cbb8e11d4f..0000000000 --- a/pynest/nest/server/_hl_api_server.py +++ /dev/null @@ -1,498 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_server.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -import importlib -import inspect -import io -import sys - -import flask -from flask import Flask, request, jsonify -from flask_cors import CORS, cross_origin - -from werkzeug.exceptions import abort -from werkzeug.wrappers import Response - -import nest - -import RestrictedPython -import time - -import traceback - -from copy import deepcopy - -import os - -MODULES = os.environ.get('NEST_SERVER_MODULES', 'nest').split(',') -RESTRICTION_OFF = bool(os.environ.get('NEST_SERVER_RESTRICTION_OFF', False)) -EXCEPTION_ERROR_STATUS = 400 - -if RESTRICTION_OFF: - msg = 'NEST Server runs without a RestrictedPython trusted environment.' - print(f'***\n*** WARNING: {msg}\n***') - - -__all__ = [ - 'app', - 'do_exec', - 'set_mpi_comm', - 'run_mpi_app', - 'nestify', -] - -app = Flask(__name__) -CORS(app) - -mpi_comm = None - - -@app.route('/', methods=['GET']) -def index(): - return jsonify({ - 'nest': nest.__version__, - 'mpi': mpi_comm is not None, - }) - - -def do_exec(args, kwargs): - try: - source_code = kwargs.get('source', '') - source_cleaned = clean_code(source_code) - - locals_ = dict() - response = dict() - if RESTRICTION_OFF: - with Capturing() as stdout: - exec(source_cleaned, get_globals(), locals_) - if len(stdout) > 0: - response['stdout'] = '\n'.join(stdout) - else: - code = RestrictedPython.compile_restricted(source_cleaned, '', 'exec') # noqa - exec(code, get_restricted_globals(), locals_) - if '_print' in locals_: - response['stdout'] = ''.join(locals_['_print'].txt) - - if 'return' in kwargs: - if isinstance(kwargs['return'], list): - data = dict() - for variable in kwargs['return']: - data[variable] = locals_.get(variable, None) - else: - data = locals_.get(kwargs['return'], None) - response['data'] = nest.serializable(data) - return response - - except Exception as e: - for line in traceback.format_exception(*sys.exc_info()): - print(line, flush=True) - abort(Response(str(e), EXCEPTION_ERROR_STATUS)) - - -def log(call_name, msg): - msg = f'==> MASTER 0/{time.time():.7f} ({call_name}): {msg}' - print(msg, flush=True) - - -def do_call(call_name, args=[], kwargs={}): - """Call a PYNEST function or execute a script within the server. - - If the server is run serially (i.e., without MPI), this function - will do one of two things: If call_name is "exec", it will execute - the script given in args via do_exec(). If call_name is the name - of a PyNEST API function, it will call that function and pass args - and kwargs to it. - - If the server is run with MPI, this function will first communicate - the call type ("exec" or API call) and the args and kwargs to all - worker processes. Only then will it execute the call in the same - way as described above for the serial case. After the call, all - worker responses are collected, combined and returned. - - Please note that this function must only be called on the master - process (i.e., the task with rank 0) in a distributed scenario. - - """ - - if mpi_comm is not None: - assert mpi_comm.Get_rank() == 0 - - if mpi_comm is not None: - log(call_name, 'sending call bcast') - mpi_comm.bcast(call_name, root=0) - data = (args, kwargs) - log(call_name, f'sending data bcast, data={data}') - mpi_comm.bcast(data, root=0) - - if call_name == "exec": - master_response = do_exec(args, kwargs) - else: - call, args, kwargs = nestify(call_name, args, kwargs) - log(call_name, f'local call, args={args}, kwargs={kwargs}') - master_response = call(*args, **kwargs) - - response = [nest.serializable(master_response)] - if mpi_comm is not None: - log(call_name, 'waiting for response gather') - response = mpi_comm.gather(response[0], root=0) - log(call_name, f'received response gather, data={response}') - - return combine(call_name, response) - - -@app.route('/exec', methods=['GET', 'POST']) -@cross_origin() -def route_exec(): - """ Route to execute script in Python. - """ - - args, kwargs = get_arguments(request) - response = do_call('exec', args, kwargs) - return jsonify(response) - - -# -------------------------- -# RESTful API -# -------------------------- - -nest_calls = dir(nest) -nest_calls = list(filter(lambda x: not x.startswith('_'), nest_calls)) -nest_calls.sort() - - -@app.route('/api', methods=['GET']) -@cross_origin() -def route_api(): - """ Route to list call functions in NEST. - """ - return jsonify(nest_calls) - - -@app.route('/api/', methods=['GET', 'POST']) -@cross_origin() -def route_api_call(call): - """ Route to call function in NEST. - """ - print(f"\n{'='*40}\n", flush=True) - args, kwargs = get_arguments(request) - log("route_api_call", f"call={call}, args={args}, kwargs={kwargs}") - response = api_client(call, args, kwargs) - return jsonify(response) - - -# ---------------------- -# Helpers for the server -# ---------------------- - -class Capturing(list): - """ Monitor stdout contents i.e. print. - """ - def __enter__(self): - self._stdout = sys.stdout - sys.stdout = self._stringio = io.StringIO() - return self - - def __exit__(self, *args): - self.extend(self._stringio.getvalue().splitlines()) - del self._stringio # free up some memory - sys.stdout = self._stdout - - -def clean_code(source): - codes = source.split('\n') - code_cleaned = filter(lambda code: not (code.startswith('import') or code.startswith('from')), codes) # noqa - return '\n'.join(code_cleaned) - - -def get_arguments(request): - """ Get arguments from the request. - """ - args, kwargs = [], {} - if request.is_json: - json = request.get_json() - if isinstance(json, str) and len(json) > 0: - args = [json] - elif isinstance(json, list): - args = json - elif isinstance(json, dict): - kwargs = json - if 'args' in kwargs: - args = kwargs.pop('args') - elif len(request.form) > 0: - if 'args' in request.form: - args = request.form.getlist('args') - else: - kwargs = request.form.to_dict() - elif len(request.args) > 0: - if 'args' in request.args: - args = request.args.getlist('args') - else: - kwargs = request.args.to_dict() - return list(args), kwargs - - -def get_globals(): - """ Get globals for exec function. - """ - copied_globals = globals().copy() - - # Add modules to copied globals - modlist = [(module, importlib.import_module(module)) for module in MODULES] - modules = dict(modlist) - copied_globals.update(modules) - - return copied_globals - - -def get_or_error(func): - """ Wrapper to get data and status. - """ - def func_wrapper(call, args, kwargs): - try: - return func(call, args, kwargs) - except Exception as e: - for line in traceback.format_exception(*sys.exc_info()): - print(line, flush=True) - abort(Response(str(e), EXCEPTION_ERROR_STATUS)) - return func_wrapper - - -def get_restricted_globals(): - """ Get restricted globals for exec function. - """ - def getitem(obj, index): - typelist = (list, tuple, dict, nest.NodeCollection) - if obj is not None and type(obj) in typelist: - return obj[index] - msg = f"Error getting restricted globals: unidentified object '{obj}'." - raise TypeError(msg) - - restricted_builtins = RestrictedPython.safe_builtins.copy() - restricted_builtins.update(RestrictedPython.limited_builtins) - restricted_builtins.update(RestrictedPython.utility_builtins) - restricted_builtins.update(dict( - max=max, - min=min, - sum=sum, - time=time, - )) - - restricted_globals = dict( - __builtins__=restricted_builtins, - _print_=RestrictedPython.PrintCollector, - _getattr_=RestrictedPython.Guards.safer_getattr, - _getitem_=getitem, - _getiter_=iter, - _unpack_sequence_=RestrictedPython.Guards.guarded_unpack_sequence, - _write_=RestrictedPython.Guards.full_write_guard, - ) - - # Add modules to restricted globals - modlist = [(module, importlib.import_module(module)) for module in MODULES] - modules = dict(modlist) - restricted_globals.update(modules) - - return restricted_globals - - -def nestify(call_name, args, kwargs): - """Get the NEST API call and convert arguments if neccessary. - """ - - call = getattr(nest, call_name) - objectnames = ['nodes', 'source', 'target', 'pre', 'post'] - paramKeys = list(inspect.signature(call).parameters.keys()) - args = [nest.NodeCollection(arg) if paramKeys[idx] in objectnames - else arg for (idx, arg) in enumerate(args)] - for (key, value) in kwargs.items(): - if key in objectnames: - kwargs[key] = nest.NodeCollection(value) - - return call, args, kwargs - - -@get_or_error -def api_client(call_name, args, kwargs): - """ API Client to call function in NEST. - """ - - call = getattr(nest, call_name) - - if callable(call): - if 'inspect' in kwargs: - response = { - 'data': getattr(inspect, kwargs['inspect'])(call) - } - else: - response = do_call(call_name, args, kwargs) - else: - response = call - - return response - - -def set_mpi_comm(comm): - global mpi_comm - mpi_comm = comm - - -def run_mpi_app(host="127.0.0.1", port=52425): - # NEST crashes with a segmentation fault if the number of threads - # is changed from the outside. Calling run() with threaded=False - # prevents Flask from performing such changes. - app.run(host=host, port=port, threaded=False) - - -def combine(call_name, response): - """Combine responses from different MPI processes. - - In a distributed scenario, each MPI process creates its own share - of the response from the data available locally. To present a - coherent view on the reponse data for the caller, this data has to - be combined. - - If this function is run serially (i.e., without MPI), it just - returns the response data from the only process immediately. - - The type of the returned result can vary depending on the call - that produced it. - - The combination of results is based on a cascade of heuristics - based on the call that was issued and individual repsonse data: - * if all responses are None, the combined response will also just - be None - * for some specific calls, the responses are known to be the - same from the master and all workers. In this case, the - combined response is just the master response - * if the response list contains only a single actual response and - None otherwise, the combined response will be that one actual - response - * for calls to GetStatus on recording devices, the combined - response will be a merged dictionary in the sense that all - fields that contain a single value in the individual responsed - are kept as a single values, while lists will be appended in - order of appearance; dictionaries in the response are - recursively treated in the same way - * for calls to GetStatus on neurons, the combined response is just - the single dictionary returned by the process on which the - neuron is actually allocated - * if the response contains one list per process, the combined - response will be those lists concatenated and flattened. - - """ - - if mpi_comm is None: - return response[0] - - if all(v is None for v in response): - return None - - # return the master response if all responses are known to be the same - if call_name in ('exec', 'Create', 'GetDefaults', 'GetKernelStatus', - 'SetKernelStatus', 'SetStatus'): - return response[0] - - # return a single response if there is only one which is not None - filtered_response = list(filter(lambda x: x is not None, response)) - if len(filtered_response) == 1: - return filtered_response[0] - - # return a single merged dictionary if there are many of them - if all(type(v[0]) is dict for v in response): - return merge_dicts(response) - - # return a flattened list if the response only consists of lists - if all(type(v) is list for v in response): - return [item for lst in response for item in lst] - - log("combine()", f"ERROR: cannot combine response={response}") - msg = "Cannot combine data because of unknown reason" - raise Exception(msg) - - -def merge_dicts(response): - """Merge status dictionaries of recorders - - This function runs through a zipped list and performs the - following steps: - * sum up all n_events fields - * if recording to memory: merge the event dictionaries by joining - all contained arrays - * if recording to ascii: join filenames arrays - * take all other values directly from the device on the first - process - - """ - - result = [] - - for device_dicts in zip(*response): - - # TODO: either stip fields like thread, vp, thread_local_id, - # and local or make them lists that contain the values from - # all dicts. - - element_type = device_dicts[0]['element_type'] - - if element_type not in ('neuron', 'recorder', 'stimulator'): - msg = f'Cannot combine data of element with type "{element_type}".' - raise Exception(msg) - - if element_type == 'neuron': - tmp = list(filter(lambda status: status['local'], device_dicts)) - assert len(tmp) == 1 - result.append(tmp[0]) - - if element_type == 'recorder': - tmp = deepcopy(device_dicts[0]) - tmp['n_events'] = 0 - - for device_dict in device_dicts: - tmp['n_events'] += device_dict['n_events'] - - record_to = tmp['record_to'] - if record_to not in ('ascii', 'memory'): - msg = f'Cannot combine data when recording to "{record_to}".' - raise Exception(msg) - - if record_to == 'memory': - event_keys = tmp['events'].keys() - for key in event_keys: - tmp['events'][key] = [] - for device_dict in device_dicts: - for key in event_keys: - tmp['events'][key].extend(device_dict['events'][key]) - - if record_to == 'ascii': - tmp['filenames'] = [] - for device_dict in device_dicts: - tmp['filenames'].extend(device_dict['filenames']) - - result.append(tmp) - - if element_type == 'stimulator': - result.append(device_dicts[0]) - - return result - - -if __name__ == "__main__": - app.run() diff --git a/pynest/nest/server/2hl_api_server.py b/pynest/nest/server/hl_api_server.py similarity index 100% rename from pynest/nest/server/2hl_api_server.py rename to pynest/nest/server/hl_api_server.py diff --git a/pynest/nest/spatial/__init__.py b/pynest/nest/spatial/__init__.py index c2227b7373..94e1fa6e5f 100644 --- a/pynest/nest/spatial/__init__.py +++ b/pynest/nest/spatial/__init__.py @@ -21,8 +21,8 @@ import functools as _functools -from ._hl_api_spatial import DistanceParameter as _DistanceParameter -from ._hl_api_spatial import * # noqa: F401,F403 +from .hl_api_spatial import DistanceParameter as _DistanceParameter +from .hl_api_spatial import * # noqa: F401,F403 @_functools.lru_cache(maxsize=None) diff --git a/pynest/nest/spatial/_hl_api_spatial.py b/pynest/nest/spatial/_hl_api_spatial.py deleted file mode 100644 index 395bfd49c9..0000000000 --- a/pynest/nest/spatial/_hl_api_spatial.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_spatial.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -import numpy as np -from ..lib._hl_api_types import CreateParameter, Parameter -from .. import nestkernel_api as nestkernel - - -__all__ = [ - 'distance', - 'grid', - 'free', - 'pos', - 'source_pos', - 'target_pos', -] - - -class DistanceParameter(Parameter): - """ - Object representing the distance between two nodes in space. - - If used alone, the DistanceObject represents simply the Euclidean - distance between two nodes. - - Alternatively the distance in a single dimension may be chosen. Three - properties are defined, x, y, and z, which represent the distance in - their respective dimensions. Note that the distance parameter can only - be used in contexts with two nodes, e.g. when connecting. - """ - - def __init__(self): - distance_parameter = CreateParameter('distance', {}) - super().__init__(distance_parameter._datum) - - @property - def x(self): - """Parameter representing the distance on the x-axis""" - return CreateParameter('distance', {'dimension': 1}) - - @property - def y(self): - """Parameter representing the distance on the y-axis""" - return CreateParameter('distance', {'dimension': 2}) - - @property - def z(self): - """Parameter representing the distance on the z-axis""" - return CreateParameter('distance', {'dimension': 3}) - - @staticmethod - def n(dimension): - """ - Distance in given dimension. - - Parameters - ---------- - dimension : int - Dimension in which to get the distance. - - Returns - ------- - Parameter: - Object yielding the distance in the given dimension. - """ - return CreateParameter('distance', {'dimension': dimension}) - - -distance = DistanceParameter() - - -class pos: - """ - Position of node in a specific dimension. - - Three properties are defined, x, y, and z, which represent the - position in their respective dimensions. Note that this parameter can - only be used in contexts with one node, e.g. when setting node status. - """ - x = CreateParameter('position', {'dimension': 0}) - y = CreateParameter('position', {'dimension': 1}) - z = CreateParameter('position', {'dimension': 2}) - - @staticmethod - def n(dimension): - """ - Position in given dimension. - - Parameters - ---------- - dimension : int - Dimension in which to get the position. - - Returns - ------- - Parameter: - Object yielding the position in the given dimension. - """ - return CreateParameter('position', {'dimension': dimension}) - - -class source_pos: - """ - Position of the source node in a specific dimension. - - Three properties are defined, x, y, and z, which represent the source - node position in their respective dimensions. Note that this parameter - can only be used in contexts with two nodes, e.g. when connecting. - """ - x = CreateParameter('position', {'dimension': 0, 'synaptic_endpoint': 1}) - y = CreateParameter('position', {'dimension': 1, 'synaptic_endpoint': 1}) - z = CreateParameter('position', {'dimension': 2, 'synaptic_endpoint': 1}) - - @staticmethod - def n(dimension): - """ - Position of source node in given dimension. - - Parameters - ---------- - dimension : int - Dimension in which to get the position. - - Returns - ------- - Parameter: - Object yielding the position in the given dimension. - """ - return CreateParameter('position', - {'dimension': dimension, 'synaptic_endpoint': 1}) - - -class target_pos: - """ - Position of the target node in a specific dimension. - - Three properties are defined, x, y, and z, which represent the target - node position in their respective dimensions. Note that this parameter - can only be used in contexts with two nodes, e.g. when connecting. - """ - x = CreateParameter('position', {'dimension': 0, 'synaptic_endpoint': 2}) - y = CreateParameter('position', {'dimension': 1, 'synaptic_endpoint': 2}) - z = CreateParameter('position', {'dimension': 2, 'synaptic_endpoint': 2}) - - @staticmethod - def n(dimension): - """ - Position of target node in given dimension. - - Parameters - ---------- - dimension : int - Dimension in which to get the position. - - Returns - ------- - Parameter: - Object yielding the position in the given dimension. - """ - return CreateParameter('position', - {'dimension': dimension, 'synaptic_endpoint': 2}) - - -class grid: - """ - Defines grid-based positions for nodes. - - Parameters - ---------- - shape : list - Two- or three-element list with the grid shape in two or three dimensions, respectively. - center : list, optional - Position of the center of the layer. - extent : list, optional - Extent of the layer in each dimension. - edge_wrap : bool, optional - Specifies periodic boundary conditions. - """ - - def __init__(self, shape, center=None, extent=None, edge_wrap=False): - self.shape = shape - self.center = center - self.extent = extent - self.edge_wrap = edge_wrap - - -class free: - """ - Defines positions for nodes based on a list of positions, or a Parameter object. - - Parameters - ---------- - pos : [list | Parameter] - Either a list of two- or three-element lists containing positions, depending on number of dimensions, - a two- or three-element list of Parameters, depending on number of dimensions, - or a single Parameter. - extent : list, optional - Extent of the layer in each dimension. - edge_wrap : bool, optional - Specifies periodic boundary conditions. - num_dimensions : int, optional - If a single Parameter is given as position, and no extent is - specified, the number of dimensions must be set explicitly. - """ - - def __init__(self, pos, extent=None, edge_wrap=False, num_dimensions=None): - if extent and num_dimensions: - raise TypeError( - 'extent and number of dimensions cannot be specified at the' - ' same time') - if isinstance(pos, (list, tuple, np.ndarray)): - if num_dimensions: - raise TypeError( - 'number of dimensions cannot be specified when using an' - ' array of positions') - if len(pos) == sum(isinstance(d, Parameter) for d in pos): - self.pos = self._parameter_list_to_dimension(pos, len(pos)) - else: - self.pos = pos - elif isinstance(pos, Parameter): - if extent: - num_dimensions = len(extent) - # Number of dimensions is unknown if it cannot be inferred from - # extent, or if it's not explicitly specified. - if not num_dimensions: - raise TypeError( - 'could not infer number of dimensions. Set ' - 'num_dimensions or extent when using Parameter as pos') - dim_parameters = [pos for _ in range(num_dimensions)] - self.pos = self._parameter_list_to_dimension(dim_parameters, num_dimensions) - else: - raise TypeError( - 'pos must be either an array of positions, or a Parameter') - - self.extent = extent - self.edge_wrap = edge_wrap - - def _parameter_list_to_dimension(self, dim_parameters, num_dimensions): - """Converts a list of Parameters to a dimension2d or dimension3d Parameter.""" - assert(len(dim_parameters) == num_dimensions) - if num_dimensions < 2 or num_dimensions > 3: - raise ValueError('Number of dimensions must be 2 or 3') - # The dimension2d and dimension3d Parameter stores a Parameter for - # each dimension. When creating positions for nodes, values from - # each parameter are fetched for the position vector. - return nestkernel.llapi_dimension_parameter([p._datum for p in dim_parameters]) diff --git a/pynest/nest/spatial/hl_api_spatial2.py b/pynest/nest/spatial/hl_api_spatial.py similarity index 100% rename from pynest/nest/spatial/hl_api_spatial2.py rename to pynest/nest/spatial/hl_api_spatial.py diff --git a/pynest/nest/spatial_distributions/__init__.py b/pynest/nest/spatial_distributions/__init__.py index decb5a3c46..d667d86ff2 100644 --- a/pynest/nest/spatial_distributions/__init__.py +++ b/pynest/nest/spatial_distributions/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ._hl_api_spatial_distributions import * # noqa: F401,F403 +from .hl_api_spatial_distributions import * # noqa: F401,F403 diff --git a/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py b/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py deleted file mode 100644 index 9ed64268ee..0000000000 --- a/pynest/nest/spatial_distributions/_hl_api_spatial_distributions.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -# -# _hl_api_spatial_distributions.py -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -from ..math import exp -from ..lib._hl_api_types import CreateParameter - -try: - import scipy.special - HAVE_SCIPY = True -except ImportError: - HAVE_SCIPY = False - - -__all__ = [ - 'exponential', - 'gaussian', - 'gaussian2D', - 'gamma', -] - - -def exponential(x, beta=1.0): - """ - Applies an exponential distribution on a Parameter. - - Parameters - ---------- - x : Parameter - Input Parameter. - beta : float, optional - Scale parameter. Default is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('exp_distribution', { - 'x': x, - 'beta': beta, - }) - - -def gaussian(x, mean=0.0, std=1.0): - """ - Applies a gaussian distribution on a Parameter. - - Parameters - ---------- - x : Parameter - Input Parameter. - mean : float, optional - Mean of the distribution. Default is 0.0. - std : float, optional - Standard deviation of the distribution. Default is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('gaussian', { - 'x': x, - 'mean': mean, - 'std': std, - }) - - -def gaussian2D(x, y, mean_x=0.0, mean_y=0.0, std_x=1.0, std_y=1.0, rho=0.0): - """ - Applies a bivariate gaussian distribution on two Parameters, representing values in the x and y direction. - - Parameters - ---------- - x : Parameter - Input Parameter for the x-direction. - y : Parameter - Input Parameter for the y-direction. - mean_x : float, optional - Mean of the distribution in the x-direction. Default is 0.0. - mean_y : float, optional - Mean of the distribution in the y-direction. Default is 0.0. - std_x : float, optional - Standard deviation of the distribution in the x-direction. Default is 1.0. - std_y : float, optional - Standard deviation of the distribution in the y-direction. Default is 1.0. - rho : float, optional - Correlation of x and y. Default is 0.0 - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('gaussian2d', { - 'x': x, - 'y': y, - 'mean_x': mean_x, - 'mean_y': mean_y, - 'std_x': std_x, - 'std_y': std_y, - 'rho': rho, - }) - - -def gamma(x, kappa=1.0, theta=1.0): - """ - Applies a gamma distribution on a Parameter. - - This function requires SciPy, and will raise an error if SciPy cannot be imported. - - Parameters - ---------- - x : Parameter - Input Parameter. - kappa : float, optional - Shape parameter. Default is 1.0. - theta : float, optional - Scale parameter. Default is 1.0. - - Returns - ------- - Parameter: - Object yielding values drawn from the distribution. - """ - return CreateParameter('gamma', { - 'x': x, - 'kappa': kappa, - 'theta': theta - }) diff --git a/pynest/nest/spatial_distributions/hl_api_spatial_distributions2.py b/pynest/nest/spatial_distributions/hl_api_spatial_distributions.py similarity index 100% rename from pynest/nest/spatial_distributions/hl_api_spatial_distributions2.py rename to pynest/nest/spatial_distributions/hl_api_spatial_distributions.py diff --git a/testsuite/pytests/test_visualization.py b/testsuite/pytests/test_visualization.py index 5cd596fcbf..2affa297dd 100644 --- a/testsuite/pytests/test_visualization.py +++ b/testsuite/pytests/test_visualization.py @@ -73,7 +73,7 @@ def setUp(self): @pytest.mark.skipif(not HAVE_PYDOT, reason="pydot not found") def test_plot_network(self): """Test plot_network""" - import nest.visualization as nvis + import nest.plot.visualization as nvis nest.ResetKernel() sources = nest.Create("iaf_psc_alpha", 10) From 6950394a25cc6aaca9c63d840a8b1bf0a9ebb047 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Thu, 14 Sep 2023 23:31:49 +0200 Subject: [PATCH 12/17] fix more imports --- pynest/nest/logic/__init__.py | 2 +- pynest/nest/math/__init__.py | 2 +- pynest/nest/plot/__init__.py | 7 +++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pynest/nest/logic/__init__.py b/pynest/nest/logic/__init__.py index e39a4df3fe..63f9064edf 100644 --- a/pynest/nest/logic/__init__.py +++ b/pynest/nest/logic/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ._hl_api_logic import * # noqa: F401,F403 +from .hl_api_logic import * # noqa: F401,F403 diff --git a/pynest/nest/math/__init__.py b/pynest/nest/math/__init__.py index aa5e4bd220..b5c8b731c7 100644 --- a/pynest/nest/math/__init__.py +++ b/pynest/nest/math/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ._hl_api_math import * # noqa: F401,F403 +from .hl_api_math import * # noqa: F401,F403 diff --git a/pynest/nest/plot/__init__.py b/pynest/nest/plot/__init__.py index 9ac49d3f85..ea692cf2ef 100644 --- a/pynest/nest/plot/__init__.py +++ b/pynest/nest/plot/__init__.py @@ -19,8 +19,7 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ._visualization import plot_network -from ._raster_plot import extract_events, raster_plot -from ._voltage_trace import from_file, from_device +from .visualization import plot_network +from .raster_plot import extract_events, from_device, from_file -__all__ = ["extract_events", "raster_plot", "from_device", "from_file", "plot_network"] +__all__ = ["extract_events", "from_device", "from_file", "plot_network"] From 12874ebba0ef492d58f4f5332f4f5ee6a75b4243 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Thu, 14 Sep 2023 23:34:12 +0200 Subject: [PATCH 13/17] reset pynest after bad rebase --- pynest/nest/__init__.py | 19 +- pynest/nest/lib/hl_api_connection_helpers.py | 104 +-- pynest/nest/lib/hl_api_connections.py | 96 +-- pynest/nest/lib/hl_api_exceptions.py | 132 ++-- pynest/nest/lib/hl_api_helper.py | 216 +++--- pynest/nest/lib/hl_api_info.py | 109 +-- pynest/nest/lib/hl_api_models.py | 13 +- pynest/nest/lib/hl_api_nodes.py | 54 +- pynest/nest/lib/hl_api_parallel_computing.py | 38 +- pynest/nest/lib/hl_api_simulation.py | 49 +- pynest/nest/lib/hl_api_sonata.py | 666 +++++++++++++++++++ pynest/nest/lib/hl_api_spatial.py | 369 +++++----- pynest/nest/lib/hl_api_types.py | 392 ++++++----- pynest/nest/logic/hl_api_logic.py | 12 +- pynest/nest/random/__init__.py | 2 +- pynest/nest/raster_plot.py | 338 ++++++++++ pynest/nest/versionchecker.py.in | 33 + pynest/nest/visualization.py | 83 +++ pynest/nest/voltage_trace.py | 265 ++++++++ 19 files changed, 2121 insertions(+), 869 deletions(-) create mode 100644 pynest/nest/lib/hl_api_sonata.py create mode 100644 pynest/nest/raster_plot.py create mode 100644 pynest/nest/versionchecker.py.in create mode 100644 pynest/nest/visualization.py create mode 100644 pynest/nest/voltage_trace.py diff --git a/pynest/nest/__init__.py b/pynest/nest/__init__.py index 80cba88aae..9bb1b9a59f 100644 --- a/pynest/nest/__init__.py +++ b/pynest/nest/__init__.py @@ -68,6 +68,7 @@ except ImportError: pass + class NestModule(types.ModuleType): """ A module class for the ``nest`` root module to control the dynamic generation @@ -89,16 +90,16 @@ def __init__(self, name): self.__dict__.update(_original_module_attrs) # noqa # Import public APIs of submodules into the `nest.` namespace - _rel_import_star(self, ".lib._hl_api_connections") - _rel_import_star(self, ".lib._hl_api_exceptions") - _rel_import_star(self, ".lib._hl_api_info") - _rel_import_star(self, ".lib._hl_api_models") - _rel_import_star(self, ".lib._hl_api_nodes") - _rel_import_star(self, ".lib._hl_api_parallel_computing") - _rel_import_star(self, ".lib._hl_api_simulation") - _rel_import_star(self, ".lib._hl_api_spatial") - _rel_import_star(self, ".lib._hl_api_types") + _rel_import_star(self, ".lib.hl_api_connections") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_exceptions") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_info") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_models") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_nodes") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_parallel_computing") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_simulation") # noqa: F821 _rel_import_star(self, ".lib.hl_api_sonata") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_spatial") # noqa: F821 + _rel_import_star(self, ".lib.hl_api_types") # noqa: F821 # Lazy loaded modules. They are descriptors, so add them to the type object type(self).raster_plot = _lazy_module_property("raster_plot") # noqa: F821 diff --git a/pynest/nest/lib/hl_api_connection_helpers.py b/pynest/nest/lib/hl_api_connection_helpers.py index c879f76d13..6c119d7c72 100644 --- a/pynest/nest/lib/hl_api_connection_helpers.py +++ b/pynest/nest/lib/hl_api_connection_helpers.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_connection_helpers.py +# hl_api_connection_helpers.py # # This file is part of NEST. # @@ -25,13 +25,13 @@ """ import copy + import numpy as np -from .._ll_api import * -from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ._hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter -from ._hl_api_exceptions import NESTErrors +from ..ll_api import * +from .hl_api_exceptions import NESTError, NESTErrors +from .hl_api_types import CollocatedSynapses, Mask, NodeCollection, Parameter __all__ = [ "_connect_layers_needed", @@ -74,10 +74,8 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar rule = conn_spec["rule"] if isinstance(syn_spec, dict): - if "synapse_model" in syn_spec and not isinstance( - syn_spec["synapse_model"], str - ): - raise kernel.NESTError("'synapse_model' must be a string") + if "synapse_model" in syn_spec and not isinstance(syn_spec["synapse_model"], str): + raise NESTErrors.NESTError("'synapse_model' must be a string") for key, value in syn_spec.items(): # if value is a list, it is converted to a numpy array if isinstance(value, (list, tuple)): @@ -88,13 +86,11 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar if rule == "one_to_one": if value.shape[0] != prelength: if use_connect_arrays: - raise kernel.NESTError( - "'{}' has to be an array of dimension {}.".format( - key, prelength - ) + raise nestkernel.NESTError( + "'{}' has to be an array of dimension {}.".format(key, prelength) ) else: - raise kernel.NESTError( + raise NESTErrors.NESTError( "'{}' has to be an array of dimension {}, a scalar or a dictionary.".format( key, prelength ) @@ -103,7 +99,7 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar syn_spec[key] = value elif rule == "fixed_total_number": if "N" in conn_spec and value.shape[0] != conn_spec["N"]: - raise kernel.NESTError( + raise nestkernel.NESTError( "'{}' has to be an array of dimension {}, a scalar or a dictionary".format( key, conn_spec["N"] ) @@ -111,17 +107,15 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar else: syn_spec[key] = value else: - raise kernel.NESTError( + raise NESTErrors.NESTError( "'{}' has the wrong type. One-dimensional parameter arrays can only be used in " - "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format( - key - ) + "conjunction with rule 'one_to_one' or 'fixed_total_number'.".format(key) ) elif len(value.shape) == 2: if rule == "all_to_all": if value.shape[0] != postlength or value.shape[1] != prelength: - raise kernel.NESTError( + raise NESTErrors.NESTError( "'{}' has to be an array of dimension {}x{} (n_target x n_sources), a scalar " "or a dictionary.".format(key, postlength, prelength) ) @@ -130,7 +124,7 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar elif rule == "fixed_indegree": indegree = conn_spec["indegree"] if value.shape[0] != postlength or value.shape[1] != indegree: - raise kernel.NESTError( + raise nestkernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_target x indegree), a scalar " "or a dictionary.".format(key, postlength, indegree) ) @@ -139,18 +133,16 @@ def _process_syn_spec(syn_spec, conn_spec, prelength, postlength, use_connect_ar elif rule == "fixed_outdegree": outdegree = conn_spec["outdegree"] if value.shape[0] != prelength or value.shape[1] != outdegree: - raise kernel.NESTError( + raise nestkernel.NESTError( "'{}' has to be an array of dimension {}x{} (n_sources x outdegree), a scalar " "or a dictionary.".format(key, prelength, outdegree) ) else: syn_spec[key] = value.flatten() else: - raise kernel.NESTError( + raise NESTErrors.NESTError( "'{}' has the wrong type. Two-dimensional parameter arrays can only be used in " - "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format( - key - ) + "conjunction with rules 'all_to_all', 'fixed_indegree' or fixed_outdegree'.".format(key) ) # check that "synapse_model" is there for use_connect_arrays @@ -179,20 +171,10 @@ def _process_spatial_projections(conn_spec, syn_spec): "use_on_source", "allow_oversized_mask", ] - allowed_syn_spec_keys = [ - "weight", - "delay", - "synapse_model", - "synapse_label", - "receptor_type", - ] + allowed_syn_spec_keys = ["weight", "delay", "synapse_model", "synapse_label", "receptor_type"] for key in conn_spec.keys(): if key not in allowed_conn_spec_keys: - raise ValueError( - "'{}' is not allowed in conn_spec when connecting with mask or kernel".format( - key - ) - ) + raise ValueError("'{}' is not allowed in conn_spec when connecting with mask or kernel".format(key)) projections = {} projections.update(conn_spec) @@ -204,32 +186,22 @@ def _process_spatial_projections(conn_spec, syn_spec): for key in syn_list.keys(): if key not in allowed_syn_spec_keys: raise ValueError( - "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( - key - ) + "'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key) ) projections.update({"synapse_parameters": syn_spec.syn_specs}) else: for key in syn_spec.keys(): if key not in allowed_syn_spec_keys: - raise ValueError( - "'{}' is not allowed in syn_spec when connecting with mask or kernel".format( - key - ) - ) + raise ValueError("'{}' is not allowed in syn_spec when connecting with mask or kernel".format(key)) projections.update(syn_spec) if conn_spec["rule"] == "fixed_indegree": if "use_on_source" in conn_spec: - raise ValueError( - "'use_on_source' can only be set when using pairwise_bernoulli" - ) + raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") projections["connection_type"] = "pairwise_bernoulli_on_source" projections["number_of_connections"] = projections.pop("indegree") elif conn_spec["rule"] == "fixed_outdegree": if "use_on_source" in conn_spec: - raise ValueError( - "'use_on_source' can only be set when using pairwise_bernoulli" - ) + raise ValueError("'use_on_source' can only be set when using pairwise_bernoulli") projections["connection_type"] = "pairwise_bernoulli_on_target" projections["number_of_connections"] = projections.pop("outdegree") elif conn_spec["rule"] == "pairwise_bernoulli": @@ -241,7 +213,7 @@ def _process_spatial_projections(conn_spec, syn_spec): if "use_on_source" in projections: projections.pop("use_on_source") else: - raise kernel.NESTError( + raise nestkernel.NESTError( "When using kernel or mask, the only possible connection rules are " "'pairwise_bernoulli', 'fixed_indegree', or 'fixed_outdegree'" ) @@ -258,11 +230,7 @@ def _connect_layers_needed(conn_spec, syn_spec): return True # We must use ConnectLayers in some additional cases. rule_is_bernoulli = "pairwise_bernoulli" in str(conn_spec["rule"]) - if ( - "mask" in conn_spec - or ("p" in conn_spec and not rule_is_bernoulli) - or "use_on_source" in conn_spec - ): + if "mask" in conn_spec or ("p" in conn_spec and not rule_is_bernoulli) or "use_on_source" in conn_spec: return True # If a syn_spec entry is based on spatial properties, we must use ConnectLayers. if isinstance(syn_spec, dict): @@ -270,18 +238,13 @@ def _connect_layers_needed(conn_spec, syn_spec): if isinstance(item, Parameter) and item.is_spatial(): return True elif isinstance(syn_spec, CollocatedSynapses): - return any( - [ - _connect_layers_needed(conn_spec, syn_param) - for syn_param in syn_spec.syn_specs - ] - ) + return any([_connect_layers_needed(conn_spec, syn_param) for syn_param in syn_spec.syn_specs]) # If we get here, there is not need to use ConnectLayers. return False def _connect_spatial(pre, post, projections): - """Connect `pre` to `post` using the specifications in `projections`.""" + """Connect ``pre`` to ``post`` using the specifications in ``projections``.""" def fixdict(d): for k, v in d.items(): @@ -313,9 +276,7 @@ def _process_input_nodes(pre, post, conn_spec): # check for 'one_to_one' conn_spec one_to_one_cspec = ( - conn_spec - if not isinstance(conn_spec, dict) - else conn_spec.get("rule", "all_to_all") == "one_to_one" + conn_spec if not isinstance(conn_spec, dict) else conn_spec.get("rule", "all_to_all") == "one_to_one" ) # check and convert input types @@ -338,8 +299,7 @@ def _process_input_nodes(pre, post, conn_spec): if not pre_is_nc or not post_is_nc: if len(pre) != len(post): raise NESTErrors.ArgumentType( - "Connect", - "If `pre` or `post` contain non-unique IDs, then they must have the same length.", + "Connect", "If `pre` or `post` contain non-unique IDs, then they must have the same length." ) # convert to arrays @@ -360,8 +320,6 @@ def _process_input_nodes(pre, post, conn_spec): use_connect_arrays = True if use_connect_arrays and not one_to_one_cspec: - raise ValueError( - "When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'." - ) + raise ValueError("When connecting two arrays with non-unique IDs, `conn_spec` must be 'one_to_one'.") return use_connect_arrays, pre, post diff --git a/pynest/nest/lib/hl_api_connections.py b/pynest/nest/lib/hl_api_connections.py index 17f7ba3909..ff89497182 100644 --- a/pynest/nest/lib/hl_api_connections.py +++ b/pynest/nest/lib/hl_api_connections.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_connections.py +# hl_api_connections.py # # This file is part of NEST. # @@ -25,21 +25,18 @@ import numpy -from .._ll_api import connect_arrays -from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel - -from ._hl_api_connection_helpers import ( - _process_input_nodes, +from .hl_api_connection_helpers import ( _connect_layers_needed, _connect_spatial, _process_conn_spec, + _process_input_nodes, _process_spatial_projections, _process_syn_spec, ) -from ._hl_api_nodes import Create -from ._hl_api_parallel_computing import NumProcesses -from ._hl_api_types import NodeCollection, SynapseCollection, Mask, Parameter +from .hl_api_nodes import Create +from .hl_api_parallel_computing import NumProcesses +from .hl_api_types import Mask, NodeCollection, Parameter, SynapseCollection __all__ = [ "Connect", @@ -134,7 +131,7 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F Raises ------ - kernel.NESTError + nestkernel.NESTError Notes ----- @@ -189,7 +186,7 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F will be used. Distributed parameters can be defined through NEST's different parametertypes. NEST has various - random parameters, spatial parameters and distributions (only accesseable for nodes with spatial positions), + random parameters, spatial parameters and distributions (only accessible for nodes with spatial positions), logical expressions and mathematical expressions, which can be used to define node and connection parameters. To see all available parameters, see documentation defined in distributions, logic, math, @@ -199,23 +196,20 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F --------- :ref:`connection_management` """ + use_connect_arrays, pre, post = _process_input_nodes(pre, post, conn_spec) # Converting conn_spec to dict, without putting it on the SLI stack. processed_conn_spec = _process_conn_spec(conn_spec) # If syn_spec is given, its contents are checked, and if needed converted # to the right formats. - processed_syn_spec = _process_syn_spec( - syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays - ) + processed_syn_spec = _process_syn_spec(syn_spec, processed_conn_spec, len(pre), len(post), use_connect_arrays) # If pre and post are arrays of node IDs, and conn_spec is unspecified, # the node IDs are connected one-to-one. if use_connect_arrays: if return_synapsecollection: - raise ValueError( - "SynapseCollection cannot be returned when connecting two arrays of node IDs" - ) + raise ValueError("SynapseCollection cannot be returned when connecting two arrays of node IDs") if processed_syn_spec is None: raise ValueError( @@ -229,16 +223,8 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F if "delays" in processed_syn_spec: raise ValueError("To specify delays, use 'delay' in syn_spec.") - weights = ( - numpy.array(processed_syn_spec["weight"]) - if "weight" in processed_syn_spec - else None - ) - delays = ( - numpy.array(processed_syn_spec["delay"]) - if "delay" in processed_syn_spec - else None - ) + weights = numpy.array(processed_syn_spec["weight"]) if "weight" in processed_syn_spec else None + delays = numpy.array(processed_syn_spec["delay"]) if "delay" in processed_syn_spec else None try: synapse_model = processed_syn_spec["synapse_model"] @@ -251,15 +237,11 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F # Split remaining syn_spec entries to key and value arrays reduced_processed_syn_spec = { k: processed_syn_spec[k] - for k in set(processed_syn_spec.keys()).difference( - set(("weight", "delay", "synapse_model")) - ) + for k in set(processed_syn_spec.keys()).difference(set(("weight", "delay", "synapse_model"))) } if len(reduced_processed_syn_spec) > 0: - syn_param_keys = numpy.array( - list(reduced_processed_syn_spec.keys()), dtype=numpy.string_ - ) + syn_param_keys = numpy.array(list(reduced_processed_syn_spec.keys()), dtype=numpy.string_) syn_param_values = numpy.zeros([len(reduced_processed_syn_spec), len(pre)]) for i, value in enumerate(reduced_processed_syn_spec.values()): @@ -268,9 +250,7 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F syn_param_keys = None syn_param_values = None - connect_arrays( - pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values - ) + nestkernel.ll_api_connect_arrays(pre, post, weights, delays, synapse_model, syn_param_keys, syn_param_values) return if not isinstance(pre, NodeCollection): @@ -287,21 +267,17 @@ def Connect(pre, post, conn_spec=None, syn_spec=None, return_synapsecollection=F raise TypeError("Presynaptic NodeCollection must have spatial information") # Create the projection dictionary - spatial_projections = _process_spatial_projections( - processed_conn_spec, processed_syn_spec - ) + spatial_projections = _process_spatial_projections(processed_conn_spec, processed_syn_spec) _connect_spatial(pre._datum, post._datum, spatial_projections) else: - nestkernel.llapi_connect( - pre._datum, post._datum, processed_conn_spec, processed_syn_spec - ) + nestkernel.llapi_connect(pre._datum, post._datum, processed_conn_spec, processed_syn_spec) if return_synapsecollection: return GetConnections(pre, post) def Disconnect(*args, conn_spec=None, syn_spec=None): - """Disconnect connections in a SynnapseCollection, or `pre` neurons from `post` neurons. + """Disconnect connections in a SynapseCollection, or `pre` neurons from `post` neurons. When specifying `pre` and `post` nodes, they are disconnected using the specified disconnection rule (one-to-one by default) and synapse type (:cpp:class:`static_synapse ` by default). @@ -310,7 +286,7 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): Parameters ---------- args : SynapseCollection or NodeCollections - Either a collection of connections to disconnect, or pre- and postsynaptic nodes given as `NodeCollection`s + Either a collection of connections to disconnect, or pre- and postsynaptic nodes given as NodeCollections conn_spec : str or dict Disconnection rule when specifying pre- and postsynaptic nodes, see below syn_spec : str or dict @@ -324,9 +300,9 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): Apply the same rules as for connectivity specs in the :py:func:`.Connect` method Possible choices of the conn_spec are - :: - - 'one_to_one' - - 'all_to_all' + + - 'one_to_one' + - 'all_to_all' **syn_spec** @@ -364,33 +340,21 @@ def Disconnect(*args, conn_spec=None, syn_spec=None): if len(args) == 1: synapsecollection = args[0] if not isinstance(synapsecollection, SynapseCollection): - raise TypeError( - "Arguments must be either a SynapseCollection or two NodeCollections" - ) + raise TypeError("Arguments must be either a SynapseCollection or two NodeCollections") if conn_spec is not None or syn_spec is not None: - raise ValueError( - "When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified" - ) + raise ValueError("When disconnecting with a SynapseCollection, conn_spec and syn_spec cannot be specified") synapsecollection.disconnect() elif len(args) == 2: # Fill default values conn_spec = "one_to_one" if conn_spec is None else conn_spec syn_spec = "static_synapse" if syn_spec is None else syn_spec - if is_string(conn_spec): + if isinstance(conn_spec, str): conn_spec = {"rule": conn_spec} - if is_string(syn_spec): + if isinstance(syn_spec, str): syn_spec = {"synapse_model": syn_spec} pre, post = args if not isinstance(pre, NodeCollection) or not isinstance(post, NodeCollection): - raise TypeError( - "Arguments must be either a SynapseCollection or two NodeCollections" - ) - sps(pre) - sps(post) - sps(conn_spec) - sps(syn_spec) - sr("Disconnect_g_g_D_D") + raise TypeError("Arguments must be either a SynapseCollection or two NodeCollections") + nestkernel.llapi_disconnect(pre._datum, post._datum, conn_spec, syn_spec) else: - raise TypeError( - "Arguments must be either a SynapseCollection or two NodeCollections" - ) + raise TypeError("Arguments must be either a SynapseCollection or two NodeCollections") diff --git a/pynest/nest/lib/hl_api_exceptions.py b/pynest/nest/lib/hl_api_exceptions.py index cadd5797f9..0dbfefa1d1 100644 --- a/pynest/nest/lib/hl_api_exceptions.py +++ b/pynest/nest/lib/hl_api_exceptions.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_exceptions.py +# hl_api_exceptions.py # # This file is part of NEST. # @@ -27,7 +27,7 @@ class NESTMappedException(type): below gets called, creating a class with that name (the error name) and with an __init__ taking commandname and errormessage (as created in the source) which is a closure on the parent and errorname as well, with a parent of default type (self.default_parent) or - self.parents[errorname] if defined. """ + self.parents[errorname] if defined.""" def __getattr__(cls, errorname): """Creates a class of type "errorname" which is a child of cls.default_parent or @@ -35,7 +35,7 @@ def __getattr__(cls, errorname): This __getattr__ function also stores the class permanently as an attribute of cls for re-use where cls is actually the class that triggered the getattr (the class that - NESTMappedException is a metaclass of). """ + NESTMappedException is a metaclass of).""" # Dynamic class construction, first check if we know its parent if errorname in cls.parents: @@ -47,17 +47,18 @@ def __getattr__(cls, errorname): # not NESTMappedException, since that would mean the metaclass would let the new class inherit # this __getattr__, allowing unintended dynamic construction of attributes newclass = type( - cls.__name__ + '.' + errorname, + cls.__name__ + "." + errorname, (parent,), { - '__init__': cls.init(parent, errorname), - '__doc__': - """Dynamically created exception {} from {}. + "__init__": cls.init(parent, errorname), + "__doc__": """Dynamically created exception {} from {}. Created for the namespace: {}. Parent exception: {}. - """.format(errorname, cls.source, cls.__name__, parent.__name__) - } + """.format( + errorname, cls.source, cls.__name__, parent.__name__ + ), + }, ) # Cache for reuse: __getattr__ should now not get called if requested again @@ -74,8 +75,7 @@ class NESTErrors(metaclass=NESTMappedException): """ class NESTError(Exception): - """Base exception class for all NEST exceptions. - """ + """Base exception class for all NEST exceptions.""" def __init__(self, message): """Initializer for NESTError base class. @@ -90,10 +90,9 @@ def __init__(self, message): self.message = message class SLIException(NESTError): - """Base class for all exceptions coming from sli. - """ + """Base class for all exceptions coming from sli.""" - def __init__(self, commandname, errormessage, errorname='SLIException'): + def __init__(self, commandname, errormessage, errorname="SLIException"): """Initialize function. Parameters: @@ -110,13 +109,13 @@ def __init__(self, commandname, errormessage, errorname='SLIException'): self.errormessage = errormessage class PyNESTError(NESTError): - """Exceptions produced from Python/Cython code. - """ + """Exceptions produced from Python/Cython code.""" + pass @staticmethod def init(parent, errorname): - """ Static class method to construct init's for SLIException children. + """Static class method to construct init's for SLIException children. Construct our new init with closure on errorname (as a default value) and parent. The default value allows the __init__ to be chained and set by the leaf child. @@ -136,8 +135,7 @@ def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwar # recursively init the parent class: all of this is only needed to properly set errorname parent.__init__(self, commandname, errormessage, *args, errorname=errorname, **kwargs) - docstring = \ - """Initialization function. + docstring = """Initialization function. Parameters: ----------- @@ -148,7 +146,9 @@ def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwar *args, **kwargs: passed through to base class. self will be a descendant of {}. - """.format(errorname, parent.__name__) + """.format( + errorname, parent.__name__ + ) try: __init__.__doc__ = docstring @@ -167,52 +167,52 @@ def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwar source = "SLI" default_parent = SLIException parents = { - 'TypeMismatch': 'InterpreterError', - 'SystemSignal': 'InterpreterError', - 'RangeCheck': 'InterpreterError', - 'ArgumentType': 'InterpreterError', - 'BadParameterValue': 'SLIException', - 'DictError': 'InterpreterError', - 'UndefinedName': 'DictError', - 'EntryTypeMismatch': 'DictError', - 'StackUnderflow': 'InterpreterError', - 'IOError': 'SLIException', - 'UnaccessedDictionaryEntry': 'DictError', - 'UnknownModelName': 'KernelException', - 'NewModelNameExists': 'KernelException', - 'ModelInUse': 'KernelException', - 'UnknownSynapseType': 'KernelException', - 'UnknownNode': 'KernelException', - 'NoThreadSiblingsAvailable': 'KernelException', - 'LocalNodeExpected': 'KernelException', - 'NodeWithProxiesExpected': 'KernelException', - 'UnknownReceptorType': 'KernelException', - 'IncompatibleReceptorType': 'KernelException', - 'UnknownPort': 'KernelException', - 'IllegalConnection': 'KernelException', - 'InexistentConnection': 'KernelException', - 'UnknownThread': 'KernelException', - 'BadDelay': 'KernelException', - 'UnexpectedEvent': 'KernelException', - 'UnsupportedEvent': 'KernelException', - 'BadProperty': 'KernelException', - 'BadParameter': 'KernelException', - 'DimensionMismatch': 'KernelException', - 'DistributionError': 'KernelException', - 'InvalidDefaultResolution': 'KernelException', - 'InvalidTimeInModel': 'KernelException', - 'StepMultipleRequired': 'KernelException', - 'TimeMultipleRequired': 'KernelException', - 'GSLSolverFailure': 'KernelException', - 'NumericalInstability': 'KernelException', - 'KeyError': 'KernelException', - 'MUSICPortUnconnected': 'KernelException', - 'MUSICPortHasNoWidth': 'KernelException', - 'MUSICPortAlreadyPublished': 'KernelException', - 'MUSICSimulationHasRun': 'KernelException', - 'MUSICChannelUnknown': 'KernelException', - 'MUSICPortUnknown': 'KernelException', - 'MUSICChannelAlreadyMapped': 'KernelException' + "TypeMismatch": "InterpreterError", + "SystemSignal": "InterpreterError", + "RangeCheck": "InterpreterError", + "ArgumentType": "InterpreterError", + "BadParameterValue": "SLIException", + "DictError": "InterpreterError", + "UndefinedName": "DictError", + "EntryTypeMismatch": "DictError", + "StackUnderflow": "InterpreterError", + "IOError": "SLIException", + "UnaccessedDictionaryEntry": "DictError", + "UnknownModelName": "KernelException", + "NewModelNameExists": "KernelException", + "ModelInUse": "KernelException", + "UnknownSynapseType": "KernelException", + "UnknownNode": "KernelException", + "NoThreadSiblingsAvailable": "KernelException", + "LocalNodeExpected": "KernelException", + "NodeWithProxiesExpected": "KernelException", + "UnknownReceptorType": "KernelException", + "IncompatibleReceptorType": "KernelException", + "UnknownPort": "KernelException", + "IllegalConnection": "KernelException", + "InexistentConnection": "KernelException", + "UnknownThread": "KernelException", + "BadDelay": "KernelException", + "UnexpectedEvent": "KernelException", + "UnsupportedEvent": "KernelException", + "BadProperty": "KernelException", + "BadParameter": "KernelException", + "DimensionMismatch": "KernelException", + "DistributionError": "KernelException", + "InvalidDefaultResolution": "KernelException", + "InvalidTimeInModel": "KernelException", + "StepMultipleRequired": "KernelException", + "TimeMultipleRequired": "KernelException", + "GSLSolverFailure": "KernelException", + "NumericalInstability": "KernelException", + "KeyError": "KernelException", + "MUSICPortUnconnected": "KernelException", + "MUSICPortHasNoWidth": "KernelException", + "MUSICPortAlreadyPublished": "KernelException", + "MUSICSimulationHasRun": "KernelException", + "MUSICChannelUnknown": "KernelException", + "MUSICPortUnknown": "KernelException", + "MUSICChannelAlreadyMapped": "KernelException", } diff --git a/pynest/nest/lib/hl_api_helper.py b/pynest/nest/lib/hl_api_helper.py index 425bbe6b38..3034b5c8ba 100644 --- a/pynest/nest/lib/hl_api_helper.py +++ b/pynest/nest/lib/hl_api_helper.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_helper.py +# hl_api_helper.py # # This file is part of NEST. # @@ -24,59 +24,45 @@ API of the PyNEST wrapper. """ -import warnings -import json import functools -import textwrap -import subprocess +import json import os -import re -import shlex -import sys -import numpy import pydoc - +import textwrap +import warnings from string import Template -from .. import pynestkernel as kernel -from .. import nestkernel_api as nestkernel import nest +from .. import nestkernel_api as nestkernel + __all__ = [ - 'broadcast', - 'deprecated', - 'get_parameters', - 'get_parameters_hierarchical_addressing', - 'get_wrapped_text', - 'is_coercible_to_sli_array', - 'is_iterable', - 'is_sequence_of_connections', - 'is_sequence_of_node_ids', - 'load_help', - 'model_deprecation_warning', - 'restructure_data', - 'show_deprecation_warning', - 'show_help_with_pager', - 'SuppressedDeprecationWarning', - 'uni_str', + "broadcast", + "deprecated", + "get_parameters", + "get_parameters_hierarchical_addressing", + "get_wrapped_text", + "is_iterable", + "load_help", + "model_deprecation_warning", + "restructure_data", + "show_deprecation_warning", + "show_help_with_pager", + "stringify_path", # TODO PYNEST-NG: remove? + "SuppressedDeprecationWarning", ] # These flags are used to print deprecation warnings only once. # Only flags for special cases need to be entered here, such as special models # or function parameters, all flags for deprecated functions will be registered # by the @deprecated decorator, and therefore does not manually need to be placed here. -_deprecation_warning = {'deprecated_model': {'deprecation_issued': False, - 'replacement': 'replacement_mod'}, - 'iaf_psc_alpha_canon': {'deprecation_issued': False, - 'replacement': 'iaf_psc_alpha_ps'}, - 'pp_pop_psc_delta': {'deprecation_issued': False, - 'replacement': 'gif_pop_psc_exp'}} +_deprecation_warning = {"deprecated_model": {"deprecation_issued": False, "replacement": "replacement_mod"}} def format_Warning(message, category, filename, lineno, line=None): """Formats deprecation warning.""" - return '%s:%s: %s:%s\n' % (filename, lineno, category.__name__, message) + return "%s:%s: %s:%s\n" % (filename, lineno, category.__name__, message) warnings.formatwarning = format_Warning @@ -115,14 +101,15 @@ def show_deprecation_warning(func_name, alt_func_name=None, text=None): Text to display instead of standard text """ if func_name in _deprecation_warning: - if not _deprecation_warning[func_name]['deprecation_issued']: + if not _deprecation_warning[func_name]["deprecation_issued"]: if text is None: - text = ("{0} is deprecated and will be removed in a future version of NEST.\n" - "Please use {1} instead!").format(func_name, alt_func_name) + text = ( + "{0} is deprecated and will be removed in a future version of NEST.\n" "Please use {1} instead!" + ).format(func_name, alt_func_name) text = get_wrapped_text(text) - warnings.warn('\n' + text) # add LF so text starts on new line - _deprecation_warning[func_name]['deprecation_issued'] = True + warnings.warn("\n" + text) # add LF so text starts on new line + _deprecation_warning[func_name]["deprecation_issued"] = True # Since we need to pass extra arguments to the decorator, we need a @@ -146,64 +133,45 @@ def deprecated(alt_func_name, text=None): """ def deprecated_decorator(func): - _deprecation_warning[func.__name__] = {'deprecation_issued': False} + _deprecation_warning[func.__name__] = {"deprecation_issued": False} @functools.wraps(func) def new_func(*args, **kwargs): show_deprecation_warning(func.__name__, alt_func_name, text=text) return func(*args, **kwargs) + return new_func return deprecated_decorator -def is_iterable(seq): - """Return True if the given object is an iterable, False otherwise. - - Parameters - ---------- - seq : object - Object to check - - Returns - ------- - bool: - True if object is an iterable +def stringify_path(filepath): """ + Convert path-like object to string form. - try: - iter(seq) - except TypeError: - return False - - return True - - -def is_coercible_to_sli_array(seq): - """Checks whether a given object is coercible to a SLI array + Attempt to convert path-like object to a string by coercing objects + supporting the fspath protocol to its ``__fspath__`` method. Anything that + is not path-like, which includes bytes and strings, is passed through + unchanged. Parameters ---------- - seq : object - Object to check + filepath : object + Object representing file system path. Returns ------- - bool: - True if object is coercible to a SLI array + filepath : str + Stringified filepath. """ - import sys - - if sys.version_info[0] >= 3: - return isinstance(seq, (tuple, list, range)) - else: - return isinstance(seq, (tuple, list, xrange)) + if isinstance(filepath, os.PathLike): + filepath = filepath.__fspath__() # should return str or bytes object + return filepath -def is_sequence_of_connections(seq): - """Checks whether low-level API accepts seq as a sequence of - connections. +def is_iterable(seq): + """Return True if the given object is an iterable, False otherwise. Parameters ---------- @@ -213,35 +181,15 @@ def is_sequence_of_connections(seq): Returns ------- bool: - True if object is an iterable of dictionaries or - subscriptables of CONN_LEN + True if object is an iterable """ try: - cnn = next(iter(seq)) - return isinstance(cnn, dict) or len(cnn) == kernel.CONN_LEN + iter(seq) except TypeError: - pass - - return False - - -def is_sequence_of_node_ids(seq): - """Checks whether the argument is a potentially valid sequence of - node IDs (non-negative integers). - - Parameters - ---------- - seq : object - Object to check - - Returns - ------- - bool: - True if object is a potentially valid sequence of node IDs - """ + return False - return all(isinstance(n, int) and n >= 0 for n in seq) + return True def broadcast(item, length, allowed_types, name="item"): @@ -271,12 +219,13 @@ def broadcast(item, length, allowed_types, name="item"): """ if isinstance(item, allowed_types): - return length * (item, ) + return length * (item,) elif len(item) == 1: return length * item elif len(item) != length: raise TypeError( - "'{0}' must be a single value, a list with one element or a list with {1} elements.".format(name, length)) + "'{0}' must be a single value, a list with one element or a list with {1} elements.".format(name, length) + ) return item @@ -292,9 +241,12 @@ def __show_help_in_modal_window(obj, help_text): """ help_text = json.dumps(help_text) - style = "" - s = Template(""" + style = ( + "" + ) + s = Template( + """ require( ["base/js/dialog"], function(dialog) { @@ -307,9 +259,11 @@ def __show_help_in_modal_window(obj, help_text): }); } ); - """) + """ + ) from IPython.display import HTML, Javascript, display + display(HTML(style)) display(Javascript(s.substitute(jstitle=obj, jstext=help_text))) @@ -330,8 +284,8 @@ def get_help_fname(obj): File name of the help text for obj """ - docdir = sli_func("statusdict/prgdocdir ::") - help_fname = os.path.join(docdir, 'html', 'models', f'{obj}.rst') + docdir = nestkernel.ll_api_get_kernel_status()["docdir"] + help_fname = os.path.join(docdir, "html", "models", f"{obj}.rst") if os.path.isfile(help_fname): return help_fname @@ -354,7 +308,7 @@ def load_help(obj): """ help_fname = get_help_fname(obj) - with open(help_fname, 'r', encoding='utf-8') as help_file: + with open(help_fname, "r", encoding="utf-8") as help_file: help_text = help_file.read() return help_text @@ -374,14 +328,14 @@ def show_help_with_pager(obj): def check_nb(): try: - return get_ipython().__class__.__name__.startswith('ZMQ') + return get_ipython().__class__.__name__.startswith("ZMQ") except NameError: return False help_text = load_help(obj) if check_nb(): - __show_help_in_modal_window(obj + '.rst', help_text) + __show_help_in_modal_window(obj + ".rst", help_text) return pydoc.pager(help_text) @@ -405,9 +359,10 @@ def model_deprecation_warning(model): """ if model in _deprecation_warning: - if not _deprecation_warning[model]['deprecation_issued']: - text = ("The {0} model is deprecated and will be removed in a future version of NEST, " - "use {1} instead.").format(model, _deprecation_warning[model]['replacement']) + if not _deprecation_warning[model]["deprecation_issued"]: + text = ( + "The {0} model is deprecated and will be removed in a future version of NEST, " "use {1} instead." + ).format(model, _deprecation_warning[model]["replacement"]) show_deprecation_warning(model, text=text) @@ -437,15 +392,15 @@ def restructure_data(result, keys): final_result.append(result_dict[keys]) elif keys in all_keys: final_result.append(None) - final_result = tuple(final_result) else: final_result = result[0][keys] elif is_iterable(keys): - final_result = ({key: [val[i] for val in result] - for i, key in enumerate(keys)} if len(result) != 1 - else {key: val[i] for val in result - for i, key in enumerate(keys)}) + final_result = ( + {key: [val[i] for val in result] for i, key in enumerate(keys)} + if len(result) != 1 + else {key: val[i] for val in result for i, key in enumerate(keys)} + ) elif keys is None: if len(result) != 1: @@ -524,10 +479,10 @@ def get_parameters_hierarchical_addressing(nc, params): # or list of strings. if isinstance(params[0], str): value_list = nc.get(params[0]) - if type(value_list) != tuple: + if not isinstance(value_list, tuple): value_list = (value_list,) else: - raise TypeError('First argument must be a string, specifying path into hierarchical dictionary') + raise TypeError("First argument must be a string, specifying path into hierarchical dictionary") result = restructure_data(value_list, None) @@ -556,27 +511,22 @@ def __init__(self, no_dep_funcs): for which to suppress deprecation warnings """ - self._no_dep_funcs = (no_dep_funcs if not isinstance(no_dep_funcs, str) else (no_dep_funcs, )) + self._no_dep_funcs = no_dep_funcs if not isinstance(no_dep_funcs, str) else (no_dep_funcs,) self._deprecation_status = {} - sr('verbosity') # Use sli-version as we cannon import from info because of cirular inclusion problem - self._verbosity_level = spp() + self._verbosity_level = nestkernel.get_verbosity() def __enter__(self): - for func_name in self._no_dep_funcs: self._deprecation_status[func_name] = _deprecation_warning[func_name] # noqa - _deprecation_warning[func_name]['deprecation_issued'] = True + _deprecation_warning[func_name]["deprecation_issued"] = True # Suppress only if verbosity level is deprecated or lower - if self._verbosity_level <= sli_func('M_DEPRECATED'): - # Use sli-version as we cannon import from info because of cirular inclusion problem - sr("{} setverbosity".format(sli_func('M_WARNING'))) + if self._verbosity_level <= nestkernel.severity_t.M_DEPRECATED: + nestkernel.set_verbosity(nestkernel.severity_t.M_WARNING) def __exit__(self, *args): - # Reset the verbosity level and deprecation warning status - sr("{} setverbosity".format((self._verbosity_level))) + nestkernel.set_verbosity(self._verbosity_level) for func_name, deprec_dict in self._deprecation_status.items(): - _deprecation_warning[func_name]['deprecation_issued'] = ( - deprec_dict['deprecation_issued']) + _deprecation_warning[func_name]["deprecation_issued"] = deprec_dict["deprecation_issued"] diff --git a/pynest/nest/lib/hl_api_info.py b/pynest/nest/lib/hl_api_info.py index 3b9a0c8d1d..179f85768d 100644 --- a/pynest/nest/lib/hl_api_info.py +++ b/pynest/nest/lib/hl_api_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_info.py +# hl_api_info.py # # This file is part of NEST. # @@ -23,42 +23,25 @@ Functions to get information on NEST. """ -import sys import os import textwrap import webbrowser -from ._hl_api_helper import broadcast, is_iterable, load_help, show_help_with_pager -from ._hl_api_types import to_json -from .. import nestkernel_api as nestkernel import nest +from .. import nestkernel_api as nestkernel +from .hl_api_helper import broadcast, is_iterable, load_help, show_help_with_pager +from .hl_api_types import to_json + __all__ = [ - 'authors', - 'get_argv', - 'get_verbosity', - 'help', - 'helpdesk', - 'message', - 'set_verbosity', - 'sysinfo', + "get_verbosity", + "help", + "helpdesk", + "set_verbosity", + "verbosity", ] - -def sysinfo(): - """Print information on the platform on which NEST was compiled. - - """ - - sr("sysinfo") - - -def authors(): - """Print the authors of NEST. - - """ - - sr("authors") +verbosity = nestkernel.severity_t def helpdesk(): @@ -73,8 +56,8 @@ def helpdesk(): """ - docdir = sli_func("statusdict/prgdocdir ::") - help_fname = os.path.join(docdir, 'html', 'index.html') + docdir = nestkernel.ll_api_get_kernel_status()["docdir"] + help_fname = os.path.join(docdir, "html", "index.html") if not os.path.isfile(help_fname): msg = "Sorry, the help index cannot be opened. " @@ -115,51 +98,17 @@ def help(obj=None, return_text=False): else: show_help_with_pager(obj) except FileNotFoundError: - print(textwrap.dedent(f""" + print( + textwrap.dedent( + f""" Sorry, there is no help for model '{obj}'. - Use the Python help() function to obtain help on PyNEST functions.""")) + Use the Python help() function to obtain help on PyNEST functions.""" + ) + ) else: print(nest.__doc__) -def get_argv(): - """Return argv as seen by NEST. - - This is similar to Python :code:`sys.argv` but might have changed after - MPI initialization. - - Returns - ------- - tuple - Argv, as seen by NEST - - """ - - sr('statusdict') - statusdict = spp() - return statusdict['argv'] - - -def message(level, sender, text): - """Print a message using message system of NEST. - - Parameters - ---------- - level : - Level - sender : - Message sender - text : str - Text to be sent in the message - - """ - - sps(level) - sps(sender) - sps(text) - sr('message') - - def get_verbosity(): """Return verbosity level of NEST's messages. @@ -172,12 +121,11 @@ def get_verbosity(): Returns ------- - int: + severity_t: The current verbosity level """ - sr('verbosity') - return spp() + return nestkernel.llapi_get_verbosity() def set_verbosity(level): @@ -198,14 +146,11 @@ def set_verbosity(level): Parameters ---------- - level : str, default: 'M_INFO' - Can be one of 'M_FATAL', 'M_ERROR', 'M_WARNING', 'M_DEPRECATED', - 'M_INFO' or 'M_ALL'. + level : severity_t, default: 'M_ALL' + Can be one of the values of the nest.verbosity enum. """ - # TODO-PYNEST-NG: There are no SLI messages anymore, so verbosity - # is now irrelevant and should be replaced when a - # replacement for message() exists. - - # sr("{} setverbosity".format(level)) - pass + if type(level) is not verbosity: + raise TypeError('"level" must be a value of the nest.verbosity enum.') + + nestkernel.llapi_set_verbosity(level) diff --git a/pynest/nest/lib/hl_api_models.py b/pynest/nest/lib/hl_api_models.py index 468bc7c7dc..ce3be14357 100644 --- a/pynest/nest/lib/hl_api_models.py +++ b/pynest/nest/lib/hl_api_models.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_models.py +# hl_api_models.py # # This file is part of NEST. # @@ -23,10 +23,10 @@ Functions for model handling """ -from .._ll_api import * from .. import nestkernel_api as nestkernel -from ._hl_api_helper import deprecated, is_iterable, model_deprecation_warning -from ._hl_api_types import to_json +from ..ll_api import * +from .hl_api_helper import deprecated, is_iterable, model_deprecation_warning +from .hl_api_types import to_json __all__ = [ "ConnectionRules", @@ -38,7 +38,6 @@ @deprecated("nest.node_models or nest.synapse_models") -@check_stack def Models(mtype="all", sel=None): """Return a tuple of neuron, device, or synapse model names. @@ -90,7 +89,6 @@ def Models(mtype="all", sel=None): @deprecated("nest.connection_rules") -@check_stack def ConnectionRules(): """Return a tuple of all available connection rules, sorted by name. @@ -104,7 +102,6 @@ def ConnectionRules(): return tuple(sorted(GetKernelStatus("connection_rules"))) -@check_stack def SetDefaults(model, params, val=None): """Set defaults for the given model or recording backend. @@ -129,7 +126,6 @@ def SetDefaults(model, params, val=None): nestkernel.llapi_set_defaults(model, params) -@check_stack def GetDefaults(model, keys=None, output=""): """Return defaults of the given model or recording backend. @@ -177,7 +173,6 @@ def GetDefaults(model, keys=None, output=""): return result -@check_stack def CopyModel(existing, new, params=None): """Create a new model by copying an existing one. diff --git a/pynest/nest/lib/hl_api_nodes.py b/pynest/nest/lib/hl_api_nodes.py index 8da5ed0bce..674d4aaffc 100644 --- a/pynest/nest/lib/hl_api_nodes.py +++ b/pynest/nest/lib/hl_api_nodes.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_nodes.py +# hl_api_nodes.py # # This file is part of NEST. # @@ -26,11 +26,13 @@ import warnings import nest -from .._ll_api import * -from .. import pynestkernel as kernel + from .. import nestkernel_api as nestkernel -from ._hl_api_helper import is_iterable, model_deprecation_warning -from ._hl_api_types import NodeCollection, Parameter +from ..ll_api import * +from .hl_api_exceptions import NESTErrors +from .hl_api_helper import is_iterable, model_deprecation_warning +from .hl_api_parallel_computing import NumProcesses, Rank +from .hl_api_types import NodeCollection, Parameter __all__ = [ "Create", @@ -63,13 +65,14 @@ def Create(model, n=1, params=None, positions=None): params : dict or list, optional Parameters for the new nodes. Can be any of the following: - - A dictionary with either single values or lists of size n. - The single values will be applied to all nodes, while the lists will be distributed across - the nodes. Both single values and lists can be given at the same time. - - A list with n dictionaries, one dictionary for each node. + - A dictionary with either single values or lists of size n. + The single values will be applied to all nodes, while the lists will be distributed across + the nodes. Both single values and lists can be given at the same time. + - A list with n dictionaries, one dictionary for each node. + Values may be :py:class:`.Parameter` objects. If omitted, the model's defaults are used. - positions: :py:class:`.spatial.grid` or :py:class:`.spatial.free` object, optional + positions: :py:class:`.grid` or :py:class:`.free` object, optional Object describing spatial positions of the nodes. If omitted, the nodes have no spatial attachment. Returns @@ -98,9 +101,15 @@ def Create(model, n=1, params=None, positions=None): # PYNEST-NG: can we support the usecase above by passing the dict into ll_create? if isinstance(params, dict) and params: # if params is a dict and not empty - iterable_or_parameter_in_params = any( - is_iterable(v) or isinstance(v, Parameter) for k, v in params.items() - ) + iterable_or_parameter_in_params = any(is_iterable(v) or isinstance(v, Parameter) for k, v in params.items()) + + if isinstance(params, (list, tuple)) and len(params) != n: + raise TypeError("list of params must have one dictionary per node") + + if params is not None and not ( + isinstance(params, dict) or (isinstance(params, (list, tuple)) and all(isinstance(e, dict) for e in params)) + ): + raise TypeError("params must be either a dict of parameters or a list or tuple of dicts") if positions is not None: # Explicitly retrieve lazy loaded spatial property from the module class. @@ -108,9 +117,7 @@ def Create(model, n=1, params=None, positions=None): spatial = getattr(nest.NestModule, "spatial") # We only accept positions as either a free object or a grid object. if not isinstance(positions, (spatial.free, spatial.grid)): - raise TypeError( - "`positions` must be either a nest.spatial.free or a nest.spatial.grid object" - ) + raise TypeError("`positions` must be either a nest.spatial.free or a nest.spatial.grid object") layer_specs = {"elements": model} layer_specs["edge_wrap"] = positions.edge_wrap if isinstance(positions, spatial.free): @@ -121,14 +128,12 @@ def Create(model, n=1, params=None, positions=None): else: # If positions is not a free object, it must be a grid object. if n > 1: - raise kernel.NESTError( - "Cannot specify number of nodes with grid positions" - ) + raise NESTErrors.NESTError("Cannot specify number of nodes with grid positions") layer_specs["shape"] = positions.shape if positions.center is not None: - layer_specs["center"] = positions.center + layer_specs["center"] = [float(v) for v in positions.center] if positions.extent is not None: - layer_specs["extent"] = positions.extent + layer_specs["extent"] = [float(v) for v in positions.extent] layer = nestkernel.llapi_create_spatial(layer_specs) layer.set(params if params else {}) @@ -136,7 +141,8 @@ def Create(model, n=1, params=None, positions=None): node_ids = nestkernel.llapi_create(model, n) - if isinstance(params, dict) and params: # if params is a dict and not empty + if (isinstance(params, dict) and params) or isinstance(params, (list, tuple)): + # if params is a dict and not empty or a list of dicts try: node_ids.set(params) except Exception: @@ -199,9 +205,7 @@ def GetLocalNodeCollection(nc): Object representing the local nodes of the given `NodeCollection` """ if not isinstance(nc, NodeCollection): - raise TypeError( - "GetLocalNodeCollection requires a NodeCollection in order to run" - ) + raise TypeError("GetLocalNodeCollection requires a NodeCollection in order to run") rank = Rank() num_procs = NumProcesses() diff --git a/pynest/nest/lib/hl_api_parallel_computing.py b/pynest/nest/lib/hl_api_parallel_computing.py index 608268292c..44d489043a 100644 --- a/pynest/nest/lib/hl_api_parallel_computing.py +++ b/pynest/nest/lib/hl_api_parallel_computing.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_parallel_computing.py +# hl_api_parallel_computing.py # # This file is part of NEST. # @@ -23,9 +23,8 @@ Functions for parallel computing """ -from .._ll_api import * -from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel +from ..ll_api import * __all__ = [ "NumProcesses", @@ -37,7 +36,6 @@ ] -@check_stack def Rank(): """Return the MPI rank of the local process. @@ -57,10 +55,9 @@ def Rank(): may complete but generate nonsensical results. """ - return nestkernel.llapi_get_rank() + return nestkernel.llapi_get_kernel_status()["mpi_rank"] -@check_stack def NumProcesses(): """Return the overall number of MPI processes. @@ -70,10 +67,9 @@ def NumProcesses(): Number of overall MPI processes """ - return nestkernel.llapi_get_num_mpi_processes() + return nestkernel.llapi_get_kernel_status()["num_processes"] -@check_stack def SetAcceptableLatency(port_name, latency): """Set the acceptable `latency` (in ms) for a MUSIC port. @@ -85,12 +81,13 @@ def SetAcceptableLatency(port_name, latency): Latency in ms """ - sps(kernel.SLILiteral(port_name)) - sps(latency) - sr("SetAcceptableLatency") + # PYNEST-NG + # sps(kernel.SLILiteral(port_name)) + # sps(latency) + # sr("SetAcceptableLatency") + pass -@check_stack def SetMaxBuffered(port_name, size): """Set the maximum buffer size for a MUSIC port. @@ -102,24 +99,27 @@ def SetMaxBuffered(port_name, size): Buffer size """ - sps(kernel.SLILiteral(port_name)) - sps(size) - sr("SetMaxBuffered") + # PYNEST-NG + # sps(kernel.SLILiteral(port_name)) + # sps(size) + # sr("SetMaxBuffered") + pass -@check_stack def SyncProcesses(): """Synchronize all MPI processes.""" - sr("SyncProcesses") + # PYNEST-NG + # sr("SyncProcesses") + pass -@check_stack def GetLocalVPs(): """Return iterable representing the VPs local to the MPI rank.""" # Compute local VPs as range based on round-robin logic in # VPManager::get_vp(). mpitest_get_local_vps ensures this is in # sync with the kernel. - n_vp = sli_func("GetKernelStatus /total_num_virtual_procs get") + + n_vp = nestkernel.llapi_get_kernel_status()["total_num_virtual_procs"] return range(Rank(), n_vp, NumProcesses()) diff --git a/pynest/nest/lib/hl_api_simulation.py b/pynest/nest/lib/hl_api_simulation.py index c75a4ce10e..b50bc6e798 100644 --- a/pynest/nest/lib/hl_api_simulation.py +++ b/pynest/nest/lib/hl_api_simulation.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_simulation.py +# hl_api_simulation.py # # This file is part of NEST. # @@ -23,15 +23,13 @@ Functions for simulation control """ -from contextlib import contextmanager import warnings +from contextlib import contextmanager -from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel - -from .._ll_api import * -from ._hl_api_helper import is_iterable -from ._hl_api_parallel_computing import Rank +from ..ll_api import * +from .hl_api_helper import is_iterable +from .hl_api_parallel_computing import Rank __all__ = [ "Cleanup", @@ -48,10 +46,11 @@ ] -@check_stack def Simulate(t): """Simulate the network for `t` milliseconds. + `Simulate(t)` runs `Prepare()`, `Run(t)`, and `Cleanup()` in this order. + Parameters ---------- t : float @@ -59,14 +58,13 @@ def Simulate(t): See Also -------- - RunManager + RunManager, Prepare, Run, Cleanup """ nestkernel.llapi_simulate(t) -@check_stack def Run(t): """Simulate the network for `t` milliseconds. @@ -79,9 +77,7 @@ def Run(t): ------ Call between `Prepare` and `Cleanup` calls, or within a - ``with RunManager`` clause. - - Simulate(t): t' = t/m; Prepare(); for _ in range(m): Run(t'); Cleanup() + ``with RunManager`` clause. `Run(t)` is called once by each call to `Simulate(t)`. `Prepare` must be called before `Run` to calibrate the system, and `Cleanup` must be called after `Run` to close files, cleanup handles, and @@ -105,28 +101,30 @@ def Run(t): nestkernel.llapi_run(t) -@check_stack def Prepare(): - """Calibrate the system before a `Run` call. Not needed for `Simulate`. + """Calibrate the system before a `Run` call. + + `Prepare` is automatically called by `Simulate` and `RunManager`. See Also -------- - Run, Cleanup + Run, Cleanup, Simulate, RunManager """ nestkernel.llapi_prepare() -@check_stack def Cleanup(): - """Cleans up resources after a `Run` call. Not needed for `Simulate`. + """Cleans up resources after a `Run` calls. + + `Cleanup` is automatically called by `Simulate` and `RunManager`. Closes state for a series of runs, such as flushing and closing files. A `Prepare` is needed after a `Cleanup` before any more calls to `Run`. See Also -------- - Run, Prepare + Run, Prepare, Simulate, RunManager """ nestkernel.llapi_cleanup() @@ -169,7 +167,6 @@ def RunManager(): Cleanup() -@check_stack def ResetKernel(): """Reset the simulation kernel. @@ -181,10 +178,12 @@ def ResetKernel(): * all network nodes * all connections * all user-defined neuron and synapse models + are deleted, and * time * random generators + are reset. The only exception is that dynamically loaded modules are not unloaded. This may change in a future version of NEST. @@ -192,7 +191,6 @@ def ResetKernel(): nestkernel.llapi_reset_kernel() -@check_stack def SetKernelStatus(params): """Set parameters for the simulation kernel. @@ -217,7 +215,6 @@ def SetKernelStatus(params): # the module level, but have to have it on the function level. import nest # noqa - # TODO-PYNEST-NG: Enable again when KernelAttribute works raise_errors = params.get("dict_miss_is_error", nest.dict_miss_is_error) valids = nest._kernel_attr_names readonly = nest._readonly_kernel_attrs @@ -242,7 +239,6 @@ def SetKernelStatus(params): nestkernel.llapi_set_kernel_status(params) -@check_stack def GetKernelStatus(keys=None): """Obtain parameters of the simulation kernel. @@ -290,7 +286,6 @@ def GetKernelStatus(keys=None): raise TypeError("keys should be either a string or an iterable") -@check_stack def Install(module_name): """Load a dynamically linked NEST module. @@ -320,7 +315,6 @@ def Install(module_name): return sr("(%s) Install" % module_name) -@check_stack def EnableStructuralPlasticity(): """Enable structural plasticity for the network simulation @@ -330,10 +324,9 @@ def EnableStructuralPlasticity(): """ - sr("EnableStructuralPlasticity") + nestkernel.llapi_enable_structural_plasticity() -@check_stack def DisableStructuralPlasticity(): """Disable structural plasticity for the network simulation @@ -342,4 +335,4 @@ def DisableStructuralPlasticity(): EnableStructuralPlasticity """ - sr("DisableStructuralPlasticity") + nestkernel.llapi_disable_structural_plasticity() diff --git a/pynest/nest/lib/hl_api_sonata.py b/pynest/nest/lib/hl_api_sonata.py new file mode 100644 index 0000000000..f3757710a2 --- /dev/null +++ b/pynest/nest/lib/hl_api_sonata.py @@ -0,0 +1,666 @@ +# -*- coding: utf-8 -*- +# +# hl_api_sonata.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions to build and simulate networks with the SONATA format +""" + + +import itertools +import json +import os +from pathlib import Path, PurePath + +import numpy as np + +from .. import nestkernel_api as nestkernel +from .hl_api_models import GetDefaults +from .hl_api_nodes import Create +from .hl_api_simulation import GetKernelStatus, SetKernelStatus, Simulate +from .hl_api_types import NodeCollection + +try: + import pandas as pd + + have_pandas = True +except ImportError: + have_pandas = False + +try: + import h5py + + have_h5py = True +except ImportError: + have_h5py = False + +have_hdf5 = GetKernelStatus("build_info")["have_hdf5"] + +__all__ = ["SonataNetwork"] + + +class SonataNetwork: + """Class for building and simulating networks represented by the SONATA format. + + ``SonataNetwork`` provides native NEST support for building and simulating + network models represented by the SONATA format. In the SONATA format, + information about nodes, edges (synapses) and their respective properties + are stored in the table-based file formats HDF5 and CSV. Model metadata, + such as the path relation between files on disk and simulation parameters, + are stored in JSON configuration files. See the :ref:`nest_sonata` for details + on the NEST support of the SONATA format. + + The constructor takes the JSON configuration file specifying the paths to + the HDF5 and CSV files describing the network. In case simulation + parameters are stored in a separate JSON configuration file, the + constructor also has the option to pass a second configuration file. + + Parameters + ---------- + config : [str | pathlib.Path | pathlib.PurePath] + String or pathlib object describing the path to the JSON + configuration file. + sim_config : [str | pathlib.Path | pathlib.PurePath], optional + String or pathlib object describing the path to a JSON configuration + file containing simulation parameters. This is only needed if simulation + parameters are given in a separate configuration file. + + Example + ------- + :: + + import nest + + nest.ResetKernel() + + # Instantiate SonataNetwork + sonata_net = nest.SonataNetwork("path/to/config.json") + + # Create and connect nodes + node_collections = sonata_net.BuildNetwork() + + # Connect spike recorder to a population + s_rec = nest.Create("spike_recorder") + nest.Connect(node_collections["name_of_population_to_record"], s_rec) + + # Simulate the network + sonata_net.Simulate() + """ + + def __init__(self, config, sim_config=None): + if not have_hdf5: + msg = "SonataNetwork unavailable because NEST was compiled without HDF5 support" + raise nestkernel.NESTError(msg) + if not have_h5py: + msg = "SonataNetwork unavailable because h5py is not installed or could not be imported" + raise nestkernel.NESTError(msg) + if not have_pandas: + msg = "SonataNetwork unavailable because pandas is not installed or could not be imported" + raise nestkernel.NESTError(msg) + + self._node_collections = {} + self._edges_maps = [] + self._hyperslab_size_default = 2**20 + + self._are_nodes_created = False + self._is_network_built = False + + self._conf = self._parse_config(config) + if sim_config is not None: + self._conf.update(self._parse_config(sim_config)) + + if self._conf["target_simulator"] != "NEST": + msg = "'target_simulator' in configuration file must be 'NEST'." + raise ValueError(msg) + + if "dt" not in self._conf["run"]: + msg = "Time resolution 'dt' must be specified in configuration file" + raise ValueError(msg) + + SetKernelStatus({"resolution": self._conf["run"]["dt"]}) + + def _parse_config(self, config): + """Parse JSON configuration file. + + Parse JSON config file and convert to a dictionary containing + absolute paths and simulation parameters. + + Parameters + ---------- + config : [str | pathlib.Path | pathlib.PurePath] + String or pathlib object describing the path to the JSON + configuration file. + + Returns + ------- + dict + SONATA config as dictionary + """ + + if not isinstance(config, (str, PurePath, Path)): + msg = "Path to JSON configuration file must be passed as str, pathlib.PurePath or pathlib.Path" + raise TypeError(msg) + + # Get absolute path + conf_path = Path(config).resolve(strict=True) + base_path = conf_path.parent + + with open(conf_path) as fp: + conf = json.load(fp) + + # Replace path variables (e.g. $MY_DIR) with absolute paths in manifest + for k, v in conf["manifest"].copy().items(): + if "$BASE_DIR" in v: + v = v.replace("$BASE_DIR", ".") + conf["manifest"].update({k: base_path.joinpath(v).as_posix()}) + + if k.startswith("$"): + conf["manifest"][k[1:]] = conf["manifest"].pop(k) + + def recursive_substitutions(config_obj): + # Recursive substitutions of path variables with entries from manifest + if isinstance(config_obj, dict): + return {k: recursive_substitutions(v) for k, v in config_obj.items()} + elif isinstance(config_obj, list): + return [recursive_substitutions(e) for e in config_obj] + elif isinstance(config_obj, str) and config_obj.startswith("$"): + for dir, path in conf["manifest"].items(): + config_obj = config_obj.replace(dir, path) + return config_obj[1:] + return config_obj + + conf.update(recursive_substitutions(conf)) + + return conf + + def Create(self): + """Create the SONATA network nodes. + + Creates the network nodes. In the SONATA format, node populations are + serialized in node HDF5 files. Each node in a population has a node + type. Each node population has a single associated node types CSV file + that assigns properties to all nodes with a given node type. + + Please note that it is assumed that all relevant node properties are + stored in the node types CSV file. For neuron nodes, the relevant + properties are model type, model template and reference to a JSON + file describing the parametrization. + + Returns + ------- + node_collections : dict + A dictionary containing the created :py:class:`.NodeCollection` + for each population. The population names are keys. + """ + + # Iterate node config files + for nodes_conf in self._conf["networks"]["nodes"]: + csv_fn = nodes_conf["node_types_file"] + nodes_df = pd.read_csv(csv_fn, sep=r"\s+") + + # Require only one model type per CSV file + model_types_arr = nodes_df["model_type"].to_numpy() + is_one_model_type = (model_types_arr[0] == model_types_arr).all() + + if not is_one_model_type: + msg = f"Only one model type per node types CSV file is supported. {csv_fn} contains more than one." + raise ValueError(msg) + + model_type = model_types_arr[0] + + if model_type in ["point_neuron", "point_process"]: + self._create_neurons(nodes_conf, nodes_df, csv_fn) + elif model_type == "virtual": + self._create_spike_train_injectors(nodes_conf) + else: + msg = f"Model type '{model_type}' in {csv_fn} is not supported by NEST." + raise ValueError(msg) + + self._are_nodes_created = True + + return self._node_collections + + def _create_neurons(self, nodes_conf, nodes_df, csv_fn): + """Create neuron nodes. + + Parameters + ---------- + nodes_conf : dict + Config as dictionary specifying filenames + nodes_df : pandas.DataFrame + Associated node CSV table as dataframe + csv_fn : str + Name of current CSV file. Used for more informative error messages. + """ + + node_types_map = self._create_node_type_parameter_map(nodes_df, csv_fn) + + models_arr = nodes_df["model_template"].to_numpy() + is_one_model = (models_arr[0] == models_arr).all() + one_model_name = models_arr[0] if is_one_model else None + + with h5py.File(nodes_conf["nodes_file"], "r") as nodes_h5f: + # Iterate node populations in current node.h5 file + for pop_name in nodes_h5f["nodes"]: + node_type_id_dset = nodes_h5f["nodes"][pop_name]["node_type_id"][:] + + if is_one_model: + nest_nodes = Create(one_model_name, n=node_type_id_dset.size) + node_type_ids, inv_ind = np.unique(node_type_id_dset, return_inverse=True) + + # Extract node parameters + for i, node_type_id in enumerate(node_type_ids): + params_path = PurePath( + self._conf["components"]["point_neuron_models_dir"], + node_types_map[node_type_id]["dynamics_params"], + ) + + with open(params_path) as fp: + params = json.load(fp) + + nest_nodes[inv_ind == i].set(params) + else: + # More than one NEST neuron model in CSV file + + # TODO: Utilizing np.unique(node_type_id_dset, return_=...) + # with the different return options might be more efficient + + nest_nodes = NodeCollection() + for k, g in itertools.groupby(node_type_id_dset): + # k is a node_type_id key + # g is an itertools group object + # len(list(g)) gives the number of consecutive occurrences of the current k + model = node_types_map[k]["model_template"] + n_nrns = len(list(g)) + params_path = PurePath( + self._conf["components"]["point_neuron_models_dir"], + node_types_map[k]["dynamics_params"], + ) + with open(params_path) as fp: + params = json.load(fp) + + nest_nodes += Create(model, n=n_nrns, params=params) + + self._node_collections[pop_name] = nest_nodes + + def _create_spike_train_injectors(self, nodes_conf): + """Create spike train injector nodes. + + Parameters + ---------- + nodes_conf : dict + Config as dictionary specifying filenames + """ + + with h5py.File(nodes_conf["nodes_file"], "r") as nodes_h5f: + for pop_name in nodes_h5f["nodes"]: + node_type_id_dset = nodes_h5f["nodes"][pop_name]["node_type_id"] + n_nodes = node_type_id_dset.size + + input_file = None + for inputs_dict in self._conf["inputs"].values(): + if inputs_dict["node_set"] == pop_name: + input_file = inputs_dict["input_file"] + break # Break once we found the matching population + + if input_file is None: + msg = f"Could not find an input file for population {pop_name} in config file." + raise ValueError(msg) + + with h5py.File(input_file, "r") as input_h5f: + # Deduce the HDF5 file structure + all_groups = all([isinstance(g, h5py.Group) for g in input_h5f["spikes"].values()]) + any_groups = any([isinstance(g, h5py.Group) for g in input_h5f["spikes"].values()]) + if (all_groups or any_groups) and not (all_groups and any_groups): + msg = ( + "Unsupported HDF5 structure; groups and " + "datasets cannot be on the same hierarchical " + f"level in input spikes file {input_file}" + ) + raise ValueError(msg) + + if all_groups: + if pop_name in input_h5f["spikes"].keys(): + spikes_grp = input_h5f["spikes"][pop_name] + else: + msg = f"Did not find a matching HDF5 group name for population {pop_name} in {input_file}" + raise ValueError(msg) + else: + spikes_grp = input_h5f["spikes"] + + if "gids" in spikes_grp: + node_ids = spikes_grp["gids"][:] + elif "node_ids" in spikes_grp: + node_ids = spikes_grp["node_ids"][:] + else: + msg = f"No dataset called 'gids' or 'node_ids' in {input_file}" + raise ValueError(msg) + + timestamps = spikes_grp["timestamps"][:] + + # Map node id's to spike times + # TODO: Can this be done in a more efficient way? + spikes_map = {node_id: timestamps[node_ids == node_id] for node_id in range(n_nodes)} + params_lst = [ + {"spike_times": spikes_map[node_id], "allow_offgrid_times": True} for node_id in range(n_nodes) + ] + + # Create and store NC + nest_nodes = Create("spike_train_injector", n=n_nodes, params=params_lst) + self._node_collections[pop_name] = nest_nodes + + def _create_node_type_parameter_map(self, nodes_df, csv_fn): + """Create map between node type id and node properties. + + For neuron models, each node type id in the node types CSV file has: + * A model template which describes the name of the neuron model + * A reference to a JSON file describing the neuron's parametrization + + This function creates a map of the above node properties with the + node type id as key. + + Parameters + ---------- + nodes_df : pandas.DataFrame + Node type CSV table as dataframe. + csv_fn : str + Name of current CSV file. Used for more informative error messages. + + Returns + ------- + dict : + Map of node properties for the different node type ids. + """ + + if "model_template" not in nodes_df.columns: + msg = f"Missing the required 'model_template' header specifying NEST neuron models in {csv_fn}." + raise ValueError(msg) + + if "dynamics_params" not in nodes_df.columns: + msg = ( + "Missing the required 'dynamics_params' header specifying " + f".json files with model parameters in {csv_fn}" + ) + raise ValueError(msg) + + nodes_df["model_template"] = nodes_df["model_template"].str.replace("nest:", "") + + req_cols = ["model_template", "dynamics_params"] + node_types_map = nodes_df.set_index("node_type_id")[req_cols].to_dict(orient="index") + + return node_types_map + + def Connect(self, hdf5_hyperslab_size=None): + """Connect the SONATA network nodes. + + The connections are created by first parsing the edge (synapse) CSV + files to create a map of synaptic properties on the Python level. This + is then sent to the NEST kernel together with the edge HDF5 files to + create the connections. + + For large networks, the edge HDF5 files might not fit into memory in + their entirety. In the NEST kernel, the edge HDF5 datasets are therefore + read sequentially as blocks of contiguous hyperslabs. The hyperslab size + is modifiable so that the user is able to achieve a balance between + the number of read operations and memory overhead. + + Parameters + ---------- + hdf5_hyperslab_size : int, optional + Size of the hyperslab to read in one read operation. The hyperslab + size is applied to all HDF5 datasets that need to be read in order + to create the connections. Default: ``2**20``. + """ + + if not self._are_nodes_created: + msg = "The SONATA network nodes must be created before any connections can be made" + raise nestkernel.NESTError(msg) + + if hdf5_hyperslab_size is None: + hdf5_hyperslab_size = self._hyperslab_size_default + + self._verify_hyperslab_size(hdf5_hyperslab_size) + + graph_specs = self._create_graph_specs() + + # Check whether HDF5 files exist and are not blocked. + for d in graph_specs["edges"]: + try: + f = h5py.File(d["edges_file"], "r") + f.close() + except BlockingIOError as err: + raise BlockingIOError(f"{err.strerror} for {os.path.realpath(d['edges_file'])}") from None + + sps(graph_specs) + sps(hdf5_hyperslab_size) + sr("ConnectSonata") + + self._is_network_built = True + + def _verify_hyperslab_size(self, hyperslab_size): + """Check if provided hyperslab size is valid.""" + + if not isinstance(hyperslab_size, int): + raise TypeError("hdf5_hyperslab_size must be passed as int") + if hyperslab_size <= 0: + raise ValueError("hdf5_hyperslab_size must be strictly positive") + + def _create_graph_specs(self): + """Create graph specifications dictionary. + + The graph specifications (`graph_specs`) dictionary is passed to + the kernel where the connections are created. `graph_specs` has the + following structure: + + { + "nodes": + { + "": NodeCollection, + "": NodeCollection, + ... + }, + "edges": + [ + {"edges_file": '', + "syn_specs": {"": syn_spec, + "": syn_spec, + ... + } + }, + {"edges_file": '', + "syn_specs": {"": syn_spec, + "": syn_spec, + ... + } + }, + ... + ] + } + + Returns + ------- + dict : + Map of SONATA graph specifications. + """ + + self._create_edges_maps() + graph_specs = {"nodes": self._node_collections, "edges": self._edges_maps} + return graph_specs + + def _create_edges_maps(self): + """Create a collection of maps of edge properties. + + Creates a map between edge type id and edge (synapse) properties for + each edge CSV file. The associated edge HDF5 filename is included in + the map as well. + """ + + # Iterate edge config files + for edges_conf in self._conf["networks"]["edges"]: + edges_map = {} + edges_csv_fn = edges_conf["edge_types_file"] + edges_df = pd.read_csv(edges_csv_fn, sep=r"\s+") + + if "model_template" not in edges_df.columns: + msg = f"Missing the required 'model_template' header specifying NEST synapse models in {edges_csv_fn}." + raise ValueError(msg) + + # Rename column labels to names used by NEST. Note that rename + # don't throw an error for extra labels (we want this behavior) + edges_df.rename( + columns={"model_template": "synapse_model", "syn_weight": "weight"}, + inplace=True, + ) + + edges_df_cols = set(edges_df.columns) + + # If 'dynamics_params' is specified, additional synapse + # parameters may be given in a .json file + have_dynamics = "dynamics_params" in edges_df.columns + + # Extract synapse models in the edge CSV file and check if + # only one model is present; we can then use a more efficient + # procedure for extracting the syn_specs. + models_arr = edges_df["synapse_model"].to_numpy() + is_one_model = (models_arr[0] == models_arr).all() + + if is_one_model: + # Only one model in the edge CSV file + + synapse_model = models_arr[0] + # Find set of settable parameters for synapse model + settable_params = set([*GetDefaults(synapse_model)]) + # Parameters to extract (elements common to both sets) + extract_cols = list(settable_params & edges_df_cols) + if have_dynamics: + extract_cols.append("dynamics_params") + + # Extract syn_spec for each edge type + syn_specs = edges_df.set_index("edge_type_id")[extract_cols].to_dict(orient="index") + + if have_dynamics: + # Include parameters from JSON file in the syn_spec + for edge_type_id, syn_spec in syn_specs.copy().items(): + params_path = PurePath( + self._conf["components"]["synaptic_models_dir"], + syn_spec["dynamics_params"], + ) + with open(params_path) as fp: + params = json.load(fp) + + syn_specs[edge_type_id].update(params) + syn_specs[edge_type_id].pop("dynamics_params") + else: + # More than one synapse model in CSV file; in this case we + # must iterate each row in the CSV table. For each row, + # we extract the syn_spec associated with the specified model + + syn_specs = {} + idx_map = {k: i for i, k in enumerate(list(edges_df), start=1)} + + for row in edges_df.itertuples(name=None): + # Set of settable parameters + settable_params = set([*GetDefaults(row[idx_map["synapse_model"]])]) + # Parameters to extract (elements common to both sets) + extract_cols = list(settable_params & edges_df_cols) + syn_spec = {k: row[idx_map[k]] for k in extract_cols} + + if have_dynamics: + # Include parameters from JSON file in the map + params_path = PurePath( + self._conf["components"]["synaptic_models_dir"], + row[idx_map["dynamics_params"]], + ) + + with open(params_path) as fp: + params = json.load(fp) + + syn_spec.update(params) + + syn_specs[row[idx_map["edge_type_id"]]] = syn_spec + + # Create edges map + edges_map["syn_specs"] = syn_specs + edges_map["edges_file"] = edges_conf["edges_file"] + self._edges_maps.append(edges_map) + + def BuildNetwork(self, hdf5_hyperslab_size=None): + """Build SONATA network. + + Convenience function for building the SONATA network. The function + first calls the membership function :py:func:`Create()` to create the + network nodes and then the membership function :py:func:`Connect()` + to create their connections. + + For more details, see :py:func:`Create()` and :py:func:`Connect()`. + + Parameters + ---------- + hdf5_hyperslab_size : int, optional + Size of hyperslab that is read into memory in one read operation. + Applies to all HDF5 datasets relevant for creating the connections. + Default: ``2**20``. + + Returns + ------- + node_collections : dict + A dictionary containing the created :py:class:`.NodeCollection` + for each population. The population names are keys. + """ + + if hdf5_hyperslab_size is not None: + # Chunk size is verfified in Connect, but we also verify here + # to save computational resources in case of wrong input + self._verify_hyperslab_size(hdf5_hyperslab_size) + + node_collections = self.Create() + self.Connect(hdf5_hyperslab_size=hdf5_hyperslab_size) + + return node_collections + + def Simulate(self): + """Simulate the SONATA network. + + The simulation time and resolution are expected to be provided in the + JSON configuration file. + """ + + # Verify that network is built + if not self._is_network_built: + msg = "The SONATA network must be built before a simulation can be done" + raise nestkernel.NESTError(msg) + + if "tstop" in self._conf["run"]: + T_sim = self._conf["run"]["tstop"] + elif "duration" in self._conf["run"]: + T_sim = self._conf["run"]["duration"] + else: + msg = "Simulation time 'tstop' or 'duration' must be specified in configuration file" + raise ValueError(msg) + + Simulate(T_sim) + + @property + def node_collections(self): + return self._node_collections + + @property + def config(self): + return self._conf diff --git a/pynest/nest/lib/hl_api_spatial.py b/pynest/nest/lib/hl_api_spatial.py index 815efbe99f..4cc224a006 100644 --- a/pynest/nest/lib/hl_api_spatial.py +++ b/pynest/nest/lib/hl_api_spatial.py @@ -23,42 +23,43 @@ Functions relating to spatial properties of nodes """ +import os import numpy as np -from .. import pynestkernel as kernel from .. import nestkernel_api as nestkernel -from ._hl_api_helper import is_iterable -from ._hl_api_connections import GetConnections -from ._hl_api_parallel_computing import NumProcesses, Rank -from ._hl_api_types import NodeCollection +from .hl_api_connections import GetConnections +from .hl_api_helper import is_iterable, stringify_path +from .hl_api_parallel_computing import NumProcesses, Rank +from .hl_api_types import NodeCollection try: import matplotlib as mpl - import matplotlib.path as mpath import matplotlib.patches as mpatches + import matplotlib.path as mpath + HAVE_MPL = True except ImportError: HAVE_MPL = False __all__ = [ - 'CreateMask', - 'Displacement', - 'Distance', - 'DumpLayerConnections', - 'DumpLayerNodes', - 'FindCenterElement', - 'FindNearestElement', - 'GetPosition', - 'GetTargetNodes', - 'GetSourceNodes', - 'GetTargetPositions', - 'GetSourcePositions', - 'PlotLayer', - 'PlotProbabilityParameter', - 'PlotTargets', - 'PlotSources', - 'SelectNodesByMask', + "CreateMask", + "Displacement", + "Distance", + "DumpLayerConnections", + "DumpLayerNodes", + "FindCenterElement", + "FindNearestElement", + "GetPosition", + "GetTargetNodes", + "GetSourceNodes", + "GetTargetPositions", + "GetSourcePositions", + "PlotLayer", + "PlotProbabilityParameter", + "PlotTargets", + "PlotSources", + "SelectNodesByMask", ] @@ -139,7 +140,7 @@ def CreateMask(masktype, specs, anchor=None): {'lower_left' : [float, float, float], 'upper_right' : [float, float, float], 'azimuth_angle: float # default: 0.0, - 'polar_angle : float # defualt: 0.0} + 'polar_angle : float # default: 0.0} #or 'spherical' : {'radius' : float} @@ -162,7 +163,7 @@ def CreateMask(masktype, specs, anchor=None): By default the top-left corner of a grid mask, i.e., the grid mask element with grid index [0, 0], is aligned with the driver node. It can be changed by means of the 'anchor' parameter: - :: + :: 'anchor' : {'row' : float, @@ -188,10 +189,9 @@ def CreateMask(masktype, specs, anchor=None): nest.Connect(l, l, conndict) """ if anchor is None: - return sli_func('CreateMask', {masktype: specs}) + return nestkernel.llapi_create_mask({masktype: specs}) else: - return sli_func('CreateMask', - {masktype: specs, 'anchor': anchor}) + return nestkernel.llapi_create_mask({masktype: specs, "anchor": anchor}) def GetPosition(nodes): @@ -306,13 +306,12 @@ def Displacement(from_arg, to_arg): raise TypeError("to_arg must be a NodeCollection") if isinstance(from_arg, np.ndarray): - from_arg = (from_arg, ) + from_arg = (from_arg,) - if (len(from_arg) > 1 and len(to_arg) > 1 and not - len(from_arg) == len(to_arg)): + if len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(to_arg): raise ValueError("to_arg and from_arg must have same size unless one have size 1.") - return sli_func('Displacement', from_arg, to_arg) + return nestkernel.llapi_displacement(from_arg, to_arg) def Distance(from_arg, to_arg): @@ -374,13 +373,12 @@ def Distance(from_arg, to_arg): raise TypeError("to_arg must be a NodeCollection") if isinstance(from_arg, np.ndarray): - from_arg = (from_arg, ) + from_arg = (from_arg,) - if (len(from_arg) > 1 and len(to_arg) > 1 and not - len(from_arg) == len(to_arg)): + if len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(to_arg): raise ValueError("to_arg and from_arg must have same size unless one have size 1.") - return sli_func('Distance', from_arg, to_arg) + return nestkernel.llapi_spatial_distance(from_arg, to_arg) def FindNearestElement(layer, locations, find_all=False): @@ -443,7 +441,7 @@ def FindNearestElement(layer, locations, find_all=False): # Ensure locations is sequence, keeps code below simpler if not is_iterable(locations[0]): - locations = (locations, ) + locations = (locations,) result = [] @@ -479,11 +477,11 @@ def _rank_specific_filename(basename): np = NumProcesses() np_digs = len(str(np - 1)) # for pretty formatting rk = Rank() - dot = basename.find('.') + dot = basename.find(".") if dot < 0: - return '%s-%0*d' % (basename, np_digs, rk) + return "%s-%0*d" % (basename, np_digs, rk) else: - return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:]) + return "%s-%0*d%s" % (basename[:dot], np_digs, rk, basename[dot:]) def DumpLayerNodes(layer, outname): @@ -492,7 +490,8 @@ def DumpLayerNodes(layer, outname): Write `node ID` and position data to `outname` file. For each node in `layer`, a line with the following information is written: - :: + + :: node ID x-position y-position [z-position] @@ -532,13 +531,12 @@ def DumpLayerNodes(layer, outname): nest.DumpLayerNodes(s_nodes, 'positions.txt') """ + if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") - sli_func(""" - (w) file exch DumpLayerNodes close - """, - layer, _rank_specific_filename(outname)) + outname = stringify_path(outname) + nestkernel.llapi_dump_layer_nodes(layer._datum, _rank_specific_filename(outname)) def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): @@ -598,21 +596,17 @@ def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): # write connectivity information to file nest.DumpLayerConnections(s_nodes, s_nodes, 'static_synapse', 'conns.txt') """ + if not isinstance(source_layer, NodeCollection): raise TypeError("source_layer must be a NodeCollection") + if not isinstance(target_layer, NodeCollection): raise TypeError("target_layer must be a NodeCollection") - sli_func(""" - /oname Set - cvlit /synmod Set - /lyr_target Set - /lyr_source Set - oname (w) file lyr_source lyr_target synmod - DumpLayerConnections close - """, - source_layer, target_layer, synapse_model, - _rank_specific_filename(outname)) + outname = stringify_path(outname) + nestkernel.llapi_dump_layer_connections( + source_layer._datum, target_layer._datum, synapse_model, _rank_specific_filename(outname) + ) def FindCenterElement(layer): @@ -651,9 +645,9 @@ def FindCenterElement(layer): if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") - nearest_to_center = FindNearestElement(layer, layer.spatial['center'])[0] - index = layer.index(nearest_to_center.get('global_id')) - return layer[index:index+1] + nearest_to_center = FindNearestElement(layer, layer.spatial["center"])[0] + index = layer.index(nearest_to_center.get("global_id")) + return layer[index : index + 1] def GetTargetNodes(sources, tgt_layer, syn_model=None): @@ -864,12 +858,11 @@ def GetTargetPositions(sources, tgt_layer, syn_model=None): # Find positions to all nodes in target layer pos_all_tgts = GetPosition(tgt_layer) - first_tgt_node_id = tgt_layer[0].get('global_id') + first_tgt_node_id = tgt_layer[0].get("global_id") - connections = GetConnections(sources, tgt_layer, - synapse_model=syn_model) - srcs = connections.get('source') - tgts = connections.get('target') + connections = GetConnections(sources, tgt_layer, synapse_model=syn_model) + srcs = connections.get("source") + tgts = connections.get("target") if isinstance(srcs, int): srcs = [srcs] if isinstance(tgts, int): @@ -946,12 +939,11 @@ def GetSourcePositions(src_layer, targets, syn_model=None): # Find positions to all nodes in source layer pos_all_srcs = GetPosition(src_layer) - first_src_node_id = src_layer[0].get('global_id') + first_src_node_id = src_layer[0].get("global_id") - connections = GetConnections(src_layer, targets, - synapse_model=syn_model) - srcs = connections.get('source') - tgts = connections.get('target') + connections = GetConnections(src_layer, targets, synapse_model=syn_model) + srcs = connections.get("source") + tgts = connections.get("target") if isinstance(srcs, int): srcs = [srcs] if isinstance(tgts, int): @@ -997,11 +989,7 @@ def SelectNodesByMask(layer, anchor, mask_obj): mask_datum = mask_obj._datum - node_id_list = sli_func('SelectNodesByMask', - layer, anchor, mask_datum) - - # When creating a NodeCollection, the input list of nodes IDs must be sorted. - return NodeCollection(sorted(node_id_list)) + return nestkernel.llapi_select_nodes_by_mask(layer._datum, anchor, mask_datum) def _draw_extent(ax, xctr, yctr, xext, yext): @@ -1014,30 +1002,33 @@ def _draw_extent(ax, xctr, yctr, xext, yext): # thin gray line indicating extent llx, lly = xctr - xext / 2.0, yctr - yext / 2.0 urx, ury = llx + xext, lly + yext - ax.add_patch( - plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1, - zorder=1)) + ax.add_patch(plt.Rectangle((llx, lly), xext, yext, fc="none", ec="0.5", lw=1, zorder=1)) # set limits slightly outside extent - ax.set(aspect='equal', - xlim=(llx - 0.05 * xext, urx + 0.05 * xext), - ylim=(lly - 0.05 * yext, ury + 0.05 * yext), - xticks=tuple(), yticks=tuple()) + ax.set( + aspect="equal", + xlim=(llx - 0.05 * xext, urx + 0.05 * xext), + ylim=(lly - 0.05 * yext, ury + 0.05 * yext), + xticks=tuple(), + yticks=tuple(), + ) def _shifted_positions(pos, ext): """Get shifted positions corresponding to boundary conditions.""" - return [[pos[0] + ext[0], pos[1]], - [pos[0] - ext[0], pos[1]], - [pos[0], pos[1] + ext[1]], - [pos[0], pos[1] - ext[1]], - [pos[0] + ext[0], pos[1] - ext[1]], - [pos[0] - ext[0], pos[1] + ext[1]], - [pos[0] + ext[0], pos[1] + ext[1]], - [pos[0] - ext[0], pos[1] - ext[1]]] - - -def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): + return [ + [pos[0] + ext[0], pos[1]], + [pos[0] - ext[0], pos[1]], + [pos[0], pos[1] + ext[1]], + [pos[0], pos[1] - ext[1]], + [pos[0] + ext[0], pos[1] - ext[1]], + [pos[0] - ext[0], pos[1] + ext[1]], + [pos[0] + ext[0], pos[1] + ext[1]], + [pos[0] - ext[0], pos[1] - ext[1]], + ] + + +def PlotLayer(layer, fig=None, nodecolor="b", nodesize=20): """ Plot all nodes in a `layer`. @@ -1088,20 +1079,20 @@ def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): import matplotlib.pyplot as plt if not HAVE_MPL: - raise ImportError('Matplotlib could not be imported') + raise ImportError("Matplotlib could not be imported") if not isinstance(layer, NodeCollection): - raise TypeError('layer must be a NodeCollection.') + raise TypeError("layer must be a NodeCollection.") # get layer extent - ext = layer.spatial['extent'] + ext = layer.spatial["extent"] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext - xctr, yctr = layer.spatial['center'] + xctr, yctr = layer.spatial["center"] # extract position information, transpose to list of x and y pos if len(layer) == 1: @@ -1120,9 +1111,6 @@ def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): _draw_extent(ax, xctr, yctr, xext, yext) elif len(ext) == 3: - # 3D layer - from mpl_toolkits.mplot3d import Axes3D - # extract position information, transpose to list of x,y,z pos if len(layer) == 1: # handle case of single node @@ -1132,7 +1120,7 @@ def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): if fig is None: fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + ax = fig.add_subplot(111, projection="3d") else: ax = fig.gca() @@ -1145,10 +1133,20 @@ def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): return fig -def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, - mask=None, probability_parameter=None, - src_color='red', src_size=50, tgt_color='blue', tgt_size=20, - mask_color='yellow', probability_cmap='Greens'): +def PlotTargets( + src_nrn, + tgt_layer, + syn_type=None, + fig=None, + mask=None, + probability_parameter=None, + src_color="red", + src_size=50, + tgt_color="blue", + tgt_size=20, + mask_color="yellow", + probability_cmap="Greens", +): """ Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`. @@ -1235,14 +1233,14 @@ def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, srcpos = GetPosition(src_nrn) # get layer extent - ext = tgt_layer.spatial['extent'] + ext = tgt_layer.spatial["extent"] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext - xctr, yctr = tgt_layer.spatial['center'] + xctr, yctr = tgt_layer.spatial["center"] if fig is None: fig = plt.figure() @@ -1260,18 +1258,22 @@ def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, if mask is not None or probability_parameter is not None: edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] - PlotProbabilityParameter(src_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, - prob_cmap=probability_cmap, mask_color=mask_color) + PlotProbabilityParameter( + src_nrn, + probability_parameter, + mask=mask, + edges=edges, + ax=ax, + prob_cmap=probability_cmap, + mask_color=mask_color, + ) _draw_extent(ax, xctr, yctr, xext, yext) else: - # 3D layer - from mpl_toolkits.mplot3d import Axes3D - if fig is None: fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + ax = fig.add_subplot(111, projection="3d") else: ax = fig.gca() @@ -1288,10 +1290,20 @@ def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, return fig -def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, - mask=None, probability_parameter=None, - tgt_color='red', tgt_size=50, src_color='blue', src_size=20, - mask_color='yellow', probability_cmap='Greens'): +def PlotSources( + src_layer, + tgt_nrn, + syn_type=None, + fig=None, + mask=None, + probability_parameter=None, + tgt_color="red", + tgt_size=50, + src_color="blue", + src_size=20, + mask_color="yellow", + probability_cmap="Greens", +): """ Plot all sources of target neuron `tgt_nrn` in a source layer `src_layer`. @@ -1376,14 +1388,14 @@ def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, tgtpos = GetPosition(tgt_nrn) # get layer extent - ext = src_layer.spatial['extent'] + ext = src_layer.spatial["extent"] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext - xctr, yctr = src_layer.spatial['center'] + xctr, yctr = src_layer.spatial["center"] if fig is None: fig = plt.figure() @@ -1401,8 +1413,15 @@ def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, if mask is not None or probability_parameter is not None: edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] - PlotProbabilityParameter(tgt_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, - prob_cmap=probability_cmap, mask_color=mask_color) + PlotProbabilityParameter( + tgt_nrn, + probability_parameter, + mask=mask, + edges=edges, + ax=ax, + prob_cmap=probability_cmap, + mask_color=mask_color, + ) _draw_extent(ax, xctr, yctr, xext, yext) @@ -1412,7 +1431,7 @@ def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, if fig is None: fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + ax = fig.add_subplot(111, projection="3d") else: ax = fig.gca() @@ -1429,37 +1448,35 @@ def PlotSources(src_layer, tgt_nrn, syn_type=None, fig=None, return fig -def _create_mask_patches(mask, periodic, extent, source_pos, face_color='yellow'): +def _create_mask_patches(mask, periodic, extent, source_pos, face_color="yellow"): """Create Matplotlib Patch objects representing the mask""" # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest - import matplotlib.pyplot as plt import matplotlib as mtpl + import matplotlib.pyplot as plt - edge_color = 'black' + edge_color = "black" alpha = 0.2 line_width = 2 mask_patches = [] - if 'anchor' in mask: - offs = np.array(mask['anchor']) + if "anchor" in mask: + offs = np.array(mask["anchor"]) else: - offs = np.array([0., 0.]) + offs = np.array([0.0, 0.0]) - if 'circular' in mask: - r = mask['circular']['radius'] + if "circular" in mask: + r = mask["circular"]["radius"] - patch = plt.Circle(source_pos + offs, radius=r, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Circle(source_pos + offs, radius=r, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs, extent): - patch = plt.Circle(pos, radius=r, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Circle(pos, radius=r, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) - elif 'doughnut' in mask: + elif "doughnut" in mask: # Mmm... doughnut def make_doughnut_patch(pos, r_out, r_in, ec, fc, alpha): def make_circle(r): @@ -1468,6 +1485,7 @@ def make_circle(r): x = r * np.cos(t) y = r * np.sin(t) return np.hstack((x, y)) + outside_verts = make_circle(r_out)[::-1] inside_verts = make_circle(r_in) codes = np.ones(len(inside_verts), dtype=mpath.Path.code_type) * mpath.Path.LINETO @@ -1478,8 +1496,8 @@ def make_circle(r): path = mpath.Path(vertices, all_codes) return mpatches.PathPatch(path, fc=fc, ec=ec, alpha=alpha, lw=line_width) - r_in = mask['doughnut']['inner_radius'] - r_out = mask['doughnut']['outer_radius'] + r_in = mask["doughnut"]["inner_radius"] + r_out = mask["doughnut"]["outer_radius"] pos = source_pos + offs patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) mask_patches.append(patch) @@ -1487,21 +1505,20 @@ def make_circle(r): for pos in _shifted_positions(source_pos + offs, extent): patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) mask_patches.append(patch) - elif 'rectangular' in mask: - ll = np.array(mask['rectangular']['lower_left']) - ur = np.array(mask['rectangular']['upper_right']) + elif "rectangular" in mask: + ll = np.array(mask["rectangular"]["lower_left"]) + ur = np.array(mask["rectangular"]["upper_right"]) width = ur[0] - ll[0] height = ur[1] - ll[1] pos = source_pos + ll + offs - cntr = [pos[0] + width/2, pos[1] + height/2] + cntr = [pos[0] + width / 2, pos[1] + height / 2] - if 'azimuth_angle' in mask['rectangular']: - angle = mask['rectangular']['azimuth_angle'] + if "azimuth_angle" in mask["rectangular"]: + angle = mask["rectangular"]["azimuth_angle"] else: angle = 0.0 - patch = plt.Rectangle(pos, width, height, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Rectangle(pos, width, height, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) # Need to rotate about center trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData patch.set_transform(trnsf) @@ -1509,42 +1526,57 @@ def make_circle(r): if periodic: for pos in _shifted_positions(source_pos + ll + offs, extent): - patch = plt.Rectangle(pos, width, height, - fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) + patch = plt.Rectangle(pos, width, height, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) - cntr = [pos[0] + width/2, pos[1] + height/2] + cntr = [pos[0] + width / 2, pos[1] + height / 2] # Need to rotate about center trnsf = mtpl.transforms.Affine2D().rotate_deg_around(cntr[0], cntr[1], angle) + plt.gca().transData patch.set_transform(trnsf) mask_patches.append(patch) - elif 'elliptical' in mask: - width = mask['elliptical']['major_axis'] - height = mask['elliptical']['minor_axis'] - if 'azimuth_angle' in mask['elliptical']: - angle = mask['elliptical']['azimuth_angle'] + elif "elliptical" in mask: + width = mask["elliptical"]["major_axis"] + height = mask["elliptical"]["minor_axis"] + if "azimuth_angle" in mask["elliptical"]: + angle = mask["elliptical"]["azimuth_angle"] else: angle = 0.0 - if 'anchor' in mask['elliptical']: - anchor = mask['elliptical']['anchor'] + if "anchor" in mask["elliptical"]: + anchor = mask["elliptical"]["anchor"] else: - anchor = np.array([0., 0.]) - patch = mpl.patches.Ellipse(source_pos + offs + anchor, width, height, - angle=angle, fc=face_color, - ec=edge_color, alpha=alpha, lw=line_width) + anchor = np.array([0.0, 0.0]) + patch = mpl.patches.Ellipse( + source_pos + offs + anchor, + width, + height, + angle=angle, + fc=face_color, + ec=edge_color, + alpha=alpha, + lw=line_width, + ) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs + anchor, extent): - patch = mpl.patches.Ellipse(pos, width, height, angle=angle, fc=face_color, - ec=edge_color, alpha=alpha, lw=line_width) + patch = mpl.patches.Ellipse( + pos, width, height, angle=angle, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width + ) mask_patches.append(patch) else: - raise ValueError('Mask type cannot be plotted with this version of PyNEST.') + raise ValueError("Mask type cannot be plotted with this version of PyNEST.") return mask_patches -def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5, -0.5, 0.5], shape=[100, 100], - ax=None, prob_cmap='Greens', mask_color='yellow'): +def PlotProbabilityParameter( + source, + parameter=None, + mask=None, + edges=[-0.5, 0.5, -0.5, 0.5], + shape=[100, 100], + ax=None, + prob_cmap="Greens", + mask_color="yellow", +): """ Create a plot of the connection probability and/or mask. @@ -1578,10 +1610,10 @@ def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5 import matplotlib.pyplot as plt if not HAVE_MPL: - raise ImportError('Matplotlib could not be imported') + raise ImportError("Matplotlib could not be imported") if parameter is None and mask is None: - raise ValueError('At least one of parameter or mask must be specified') + raise ValueError("At least one of parameter or mask must be specified") if ax is None: fig, ax = plt.subplots() ax.set_xlim(*edges[:2]) @@ -1593,13 +1625,14 @@ def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5 positions = [[x, y] for y in np.linspace(edges[2], edges[3], shape[1])] values = parameter.apply(source, positions) z[:, i] = np.array(values) - img = ax.imshow(np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, - origin='lower', cmap=prob_cmap, vmin=0., vmax=1.) + img = ax.imshow( + np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, origin="lower", cmap=prob_cmap, vmin=0.0, vmax=1.0 + ) plt.colorbar(img, ax=ax, fraction=0.046, pad=0.04) if mask is not None: - periodic = source.spatial['edge_wrap'] - extent = source.spatial['extent'] + periodic = source.spatial["edge_wrap"] + extent = source.spatial["extent"] source_pos = GetPosition(source) patches = _create_mask_patches(mask, periodic, extent, source_pos, face_color=mask_color) for patch in patches: diff --git a/pynest/nest/lib/hl_api_types.py b/pynest/nest/lib/hl_api_types.py index c4c2cab999..d3d93438d4 100644 --- a/pynest/nest/lib/hl_api_types.py +++ b/pynest/nest/lib/hl_api_types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_types.py +# hl_api_types.py # # This file is part of NEST. # @@ -23,42 +23,45 @@ Classes defining the different PyNEST types """ -from .._ll_api import * -from .. import pynestkernel as kernel +import json +import numbers +from math import floor, log + +import numpy + from .. import nestkernel_api as nestkernel -from ._hl_api_helper import ( +from ..ll_api import * +from .hl_api_helper import ( get_parameters, get_parameters_hierarchical_addressing, is_iterable, restructure_data, ) -from ._hl_api_simulation import GetKernelStatus +from .hl_api_simulation import GetKernelStatus def sli_func(*args, **kwargs): - raise RuntimeError(f'Called sli_func with\nargs: {args}\nkwargs: {kwargs}') + raise RuntimeError(f"Called sli_func with\nargs: {args}\nkwargs: {kwargs}") -import numpy -import json -from math import floor, log try: import pandas + HAVE_PANDAS = True except ImportError: HAVE_PANDAS = False __all__ = [ - 'CollocatedSynapses', - 'Compartments', - 'CreateParameter', - 'Mask', - 'NodeCollection', - 'Parameter', - 'Receptors', - 'serializable', - 'SynapseCollection', - 'to_json', + "CollocatedSynapses", + "Compartments", + "CreateParameter", + "Mask", + "NodeCollection", + "Parameter", + "Receptors", + "serialize_data", + "SynapseCollection", + "to_json", ] @@ -84,7 +87,8 @@ def CreateParameter(parametertype, specs): Notes ----- - - Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for + + Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for instance :py:func:`.uniform`. **Parameter types** @@ -93,11 +97,14 @@ def CreateParameter(parametertype, specs): acceptable keys for their corresponding specification dictionaries: * Constant + :: 'constant' : {'value' : float} # constant value + * Randomization + :: # random parameter with uniform distribution in [min,max) @@ -114,6 +121,7 @@ def CreateParameter(parametertype, specs): 'lognormal' : {'mean' : float, # mean value of logarithm, default: 0.0 'std' : float} # standard deviation of log, default: 1.0 + """ return nestkernel.llapi_create_parameter({parametertype: specs}) @@ -212,10 +220,16 @@ def __iter__(self): def __add__(self, other): if not isinstance(other, NodeCollection): - raise NotImplementedError() + if isinstance(other, numbers.Number) and other == 0: + other = NodeCollection() + else: + raise TypeError(f"Cannot add object of type '{type(other).__name__}' to 'NodeCollection'") return nestkernel.llapi_join_nc(self._datum, other._datum) + def __radd__(self, other): + return self + other + def __getitem__(self, key): if isinstance(key, slice): if key.start is None: @@ -223,60 +237,60 @@ def __getitem__(self, key): else: start = key.start + 1 if key.start >= 0 else key.start if abs(start) > self.__len__(): - raise IndexError('slice start value outside of the NodeCollection') + raise IndexError("slice start value outside of the NodeCollection") if key.stop is None: stop = self.__len__() else: stop = key.stop if key.stop > 0 else key.stop - 1 if abs(stop) > self.__len__(): - raise IndexError('slice stop value outside of the NodeCollection') + raise IndexError("slice stop value outside of the NodeCollection") step = 1 if key.step is None else key.step if step < 1: - raise IndexError('slicing step for NodeCollection must be strictly positive') + raise IndexError("slicing step for NodeCollection must be strictly positive") return nestkernel.llapi_slice(self._datum, start, stop, step) elif isinstance(key, (int, numpy.integer)): if abs(key + (key >= 0)) > self.__len__(): - raise IndexError('index value outside of the NodeCollection') - return self[key:key + 1:1] + raise IndexError("index value outside of the NodeCollection") + return self[key : key + 1 : 1] elif isinstance(key, (list, tuple)): if len(key) == 0: return NodeCollection([]) # Must check if elements are bool first, because bool inherits from int if all(isinstance(x, bool) for x in key): if len(key) != len(self): - raise IndexError('Bool index array must be the same length as NodeCollection') + raise IndexError("Bool index array must be the same length as NodeCollection") np_key = numpy.array(key, dtype=bool) # Checking that elements are not instances of bool too, because bool inherits from int - elif all(isinstance(x, int) and not isinstance(x, bool) for x in key): + elif all(isinstance(x, (int, numpy.integer)) and not isinstance(x, bool) for x in key): np_key = numpy.array(key, dtype=numpy.uint64) if len(numpy.unique(np_key)) != len(np_key): - raise ValueError('All node IDs in a NodeCollection have to be unique') + raise ValueError("All node IDs in a NodeCollection have to be unique") else: - raise TypeError('Indices must be integers or bools') + raise TypeError("Indices must be integers or bools") return nestkernel.llapi_take_array_index(self._datum, np_key) elif isinstance(key, numpy.ndarray): if len(key) == 0: return NodeCollection([]) if len(key.shape) != 1: - raise TypeError('NumPy indices must one-dimensional') + raise TypeError("NumPy indices must one-dimensional") is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type) if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)): - raise TypeError('NumPy indices must be an array of integers or bools') + raise TypeError("NumPy indices must be an array of integers or bools") if is_booltype and len(key) != len(self): - raise IndexError('Bool index array must be the same length as NodeCollection') + raise IndexError("Bool index array must be the same length as NodeCollection") if not is_booltype and len(numpy.unique(key)) != len(key): - raise ValueError('All node IDs in a NodeCollection have to be unique') + raise ValueError("All node IDs in a NodeCollection have to be unique") return nestkernel.llapi_take_array_index(self._datum, key) else: - raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices') + raise IndexError("only integers, slices, lists, tuples, and numpy arrays are valid indices") def __contains__(self, node_id): return nestkernel.llapi_nc_contains(self._datum, node_id) def __eq__(self, other): if not isinstance(other, NodeCollection): - raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__)) + raise NotImplementedError("Cannot compare NodeCollection to {}".format(type(other).__name__)) if self.__len__() != other.__len__(): return False @@ -313,7 +327,7 @@ def get(self, *params, **kwargs): This is for hierarchical addressing. output : str, ['pandas','json'], optional If the returned data should be in a Pandas DataFrame or in a - JSON serializable format. + JSON string format. Returns ------- @@ -366,21 +380,19 @@ def get(self, *params, **kwargs): """ if not self: - raise ValueError('Cannot get parameter of empty NodeCollection') + raise ValueError("Cannot get parameter of empty NodeCollection") # ------------------------- # # Checks of input # # ------------------------- # if not kwargs: - output = '' - elif 'output' in kwargs: - output = kwargs['output'] - if output == 'pandas' and not HAVE_PANDAS: - raise ImportError('Pandas could not be imported') + output = "" + elif "output" in kwargs: + output = kwargs["output"] + if output == "pandas" and not HAVE_PANDAS: + raise ImportError("Pandas could not be imported") else: - raise TypeError('Got unexpected keyword argument') - - pandas_output = output == 'pandas' + raise TypeError("Got unexpected keyword argument") if len(params) == 0: # get() is called without arguments @@ -388,23 +400,25 @@ def get(self, *params, **kwargs): elif len(params) == 1: # params is a tuple with a string or list of strings result = get_parameters(self, params[0]) - if params[0] == 'compartments': + if params[0] == "compartments": result = Compartments(self, result) - elif params[0] == 'receptors': + elif params[0] == "receptors": result = Receptors(self, result) else: # Hierarchical addressing # TODO-PYNEST-NG: Drop this? Not sure anyone ever used it... result = get_parameters_hierarchical_addressing(self, params) + # TODO-PYNEST-NG: Decide if the behavior should be the same + # for single-node node collections or different. if isinstance(result, dict) and len(self) == 1: new_result = {} for k, v in result.items(): - new_result[k] = v[0] if is_iterable(v) and len(v) == 1 else v + new_result[k] = v[0] if is_iterable(v) and len(v) == 1 and type(v) is not dict else v result = new_result - if pandas_output: - index = self.get('global_id') + if output == "pandas": + index = self.get("global_id") if len(params) == 1 and isinstance(params[0], str): # params is a string result = {params[0]: result} @@ -415,7 +429,7 @@ def get(self, *params, **kwargs): index = [index] result = {key: [val] for key, val in result.items()} result = pandas.DataFrame(result, index=index) - elif output == 'json': + elif output == "json": result = to_json(result) return result @@ -456,25 +470,26 @@ def set(self, params=None, **kwargs): local_nodes = [self.local] if len(self) == 1 else self.local - if isinstance(params, dict) and 'compartments' in params: - if isinstance(params['compartments'], Compartments): - params['compartments'] = params['compartments'].get_tuple() - elif params['compartments'] is None: + if isinstance(params, dict) and "compartments" in params: + if isinstance(params["compartments"], Compartments): + params["compartments"] = params["compartments"].get_tuple() + elif params["compartments"] is None: # Adding compartments has been handled by the += operator, so we can remove the entry. - params.pop('compartments') + params.pop("compartments") - if isinstance(params, dict) and 'receptors' in params: - if isinstance(params['receptors'], Receptors): - params['receptors'] = params['receptors'].get_tuple() - elif params['receptors'] is None: + if isinstance(params, dict) and "receptors" in params: + if isinstance(params["receptors"], Receptors): + params["receptors"] = params["receptors"].get_tuple() + elif params["receptors"] is None: # Adding receptors has been handled by the += operator, so we can remove the entry. - params.pop('receptors') + params.pop("receptors") if isinstance(params, dict) and all(local_nodes): - node_params = self[0].get() - iterable_node_param = lambda key: key in node_params and not is_iterable(node_params[key]) - contains_list = [is_iterable(vals) and iterable_node_param(key) for key, vals in params.items()] + contains_list = [ + is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) + for key, vals in params.items() + ] if any(contains_list): temp_param = [{} for _ in range(self.__len__())] @@ -500,8 +515,7 @@ def tolist(self): if self.__len__() == 0: return [] - return (list(self.get('global_id')) if len(self) > 1 - else [self.get('global_id')]) + return list(self.get("global_id")) if len(self) > 1 else [self.get("global_id")] def index(self, node_id): """ @@ -520,7 +534,7 @@ def index(self, node_id): index = nestkernel.llapi_nc_find(self._datum, node_id) if index == -1: - raise ValueError('{} is not in NodeCollection'.format(node_id)) + raise ValueError("{} is not in NodeCollection".format(node_id)) return index @@ -534,9 +548,9 @@ def __array__(self, dtype=None): def __getattr__(self, attr): if not self: - raise AttributeError('Cannot get attribute of empty NodeCollection') + raise AttributeError("Cannot get attribute of empty NodeCollection") - if attr == 'spatial': + if attr == "spatial": metadata = nestkernel.llapi_get_nc_metadata(self._datum) val = metadata if metadata else None super().__setattr__(attr, val) @@ -546,7 +560,7 @@ def __getattr__(self, attr): # raises AttributeError to tell NumPy that interfaces other than # __array__ are not available (otherwise get_parameters would be # queried, KeyError would be raised, and all would crash) - if attr.startswith('__array_'): + if attr.startswith("__array_"): raise AttributeError return self.get(attr) @@ -554,7 +568,7 @@ def __getattr__(self, attr): def __setattr__(self, attr, value): # `_datum` is the only property of NodeCollection that should not be # interpreted as a property of the model - if attr == '_datum': + if attr == "_datum": super().__setattr__(attr, value) else: self.set({attr: value}) @@ -590,17 +604,16 @@ class SynapseCollection: _datum = None def __init__(self, data): - if isinstance(data, list): for datum in data: - if (not isinstance(datum, nestkernel.ConnectionObject)): + if not isinstance(datum, nestkernel.ConnectionObject): raise TypeError("Expected ConnectionObject.") self._datum = data elif data is None: # We can have an empty SynapseCollection if there are no connections. self._datum = data else: - if (not isinstance(data, nestkernel.ConnectionObject)): + if not isinstance(data, nestkernel.ConnectionObject): raise TypeError("Expected ConnectionObject.") # self._datum needs to be a list of ConnectionObjects. self._datum = [data] @@ -621,10 +634,8 @@ def __eq__(self, other): if self.__len__() != other.__len__(): return False - self_get = self.get(['source', 'target', 'target_thread', - 'synapse_id', 'port']) - other_get = other.get(['source', 'target', 'target_thread', - 'synapse_id', 'port']) + self_get = self.get(["source", "target", "target_thread", "synapse_id", "port"]) + other_get = other.get(["source", "target", "target_thread", "synapse_id", "port"]) if self_get != other_get: return False return True @@ -665,23 +676,23 @@ def __str__(self): def format_row_(s, t, sm, w, dly): try: - return f'{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}' + return f"{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}" except ValueError: # Used when we have many connections and print_full=False - return f'{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}' + return f"{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}" MAX_SIZE_FULL_PRINT = 35 # 35 is arbitrarily chosen. params = self.get() if len(params) == 0: - return 'The synapse collection does not contain any connections.' + return "The synapse collection does not contain any connections." - srcs = params['source'] - trgt = params['target'] - wght = params['weight'] - dlay = params['delay'] - s_model = params['synapse_model'] + srcs = params["source"] + trgt = params["target"] + wght = params["weight"] + dlay = params["delay"] + s_model = params["synapse_model"] if isinstance(srcs, int): srcs = [srcs] @@ -690,11 +701,11 @@ def format_row_(s, t, sm, w, dly): dlay = [dlay] s_model = [s_model] - src_h = 'source' - trg_h = 'target' - sm_h = 'synapse model' - w_h = 'weight' - d_h = 'delay' + src_h = "source" + trg_h = "target" + sm_h = "synapse model" + w_h = "weight" + d_h = "delay" # Find maximum number of characters for each column, used to determine width of column src_len = max(len(src_h) + 2, floor(log(max(srcs), 10))) @@ -706,21 +717,23 @@ def format_row_(s, t, sm, w, dly): # 35 is arbitrarily chosen. if len(srcs) >= MAX_SIZE_FULL_PRINT and not self.print_full: # u'\u22EE ' is the unicode for vertical ellipsis, used when we have many connections - srcs = srcs[:15] + [u'\u22EE '] + srcs[-15:] - trgt = trgt[:15] + [u'\u22EE '] + trgt[-15:] - wght = wght[:15] + [u'\u22EE '] + wght[-15:] - dlay = dlay[:15] + [u'\u22EE '] + dlay[-15:] - s_model = s_model[:15] + [u'\u22EE '] + s_model[-15:] - - headers = f'{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}' + '\n' - borders = '-'*src_len + ' ' + '-'*trg_len + ' ' + '-'*sm_len + ' ' + '-'*w_len + ' ' + '-'*d_len + '\n' - output = '\n'.join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) + srcs = srcs[:15] + ["\u22EE "] + srcs[-15:] + trgt = trgt[:15] + ["\u22EE "] + trgt[-15:] + wght = wght[:15] + ["\u22EE "] + wght[-15:] + dlay = dlay[:15] + ["\u22EE "] + dlay[-15:] + s_model = s_model[:15] + ["\u22EE "] + s_model[-15:] + + headers = f"{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}" + "\n" + borders = ( + "-" * src_len + " " + "-" * trg_len + " " + "-" * sm_len + " " + "-" * w_len + " " + "-" * d_len + "\n" + ) + output = "\n".join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) result = headers + borders + output return result def __getattr__(self, attr): - if attr == 'distance': + if attr == "distance": dist = nestkernel.llapi_distance(self._datum) super().__setattr__(attr, dist) return self.distance @@ -730,26 +743,26 @@ def __getattr__(self, attr): def __setattr__(self, attr, value): # `_datum` is the only property of SynapseCollection that should not be # interpreted as a property of the model - if attr == '_datum' or attr == 'print_full': + if attr == "_datum" or attr == "print_full": super().__setattr__(attr, value) else: self.set({attr: value}) def sources(self): """Returns iterator containing the source node IDs of the `SynapseCollection`.""" - sources = self.get('source') + sources = self.get("source") if not isinstance(sources, (list, tuple)): sources = (sources,) return iter(sources) def targets(self): """Returns iterator containing the target node IDs of the `SynapseCollection`.""" - targets = self.get('target') + targets = self.get("target") if not isinstance(targets, (list, tuple)): targets = (targets,) return iter(targets) - def get(self, keys=None, output=''): + def get(self, keys=None, output=""): """ Return a parameter dictionary of the connections. @@ -766,7 +779,7 @@ def get(self, keys=None, output=''): belonging to the given `keys`. output : str, ['pandas','json'], optional If the returned data should be in a Pandas DataFrame or in a - JSON serializable format. + JSON string format. Returns ------- @@ -806,12 +819,12 @@ def get(self, keys=None, output=''): {'source': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} """ - pandas_output = output == 'pandas' + pandas_output = output == "pandas" if pandas_output and not HAVE_PANDAS: - raise ImportError('Pandas could not be imported') + raise ImportError("Pandas could not be imported") # Return empty dictionary if we have no connections or if we have done a nest.ResetKernel() - num_conns = GetKernelStatus('num_connections') # Has to be called first because it involves MPI communication. + num_conns = GetKernelStatus("num_connections") # Has to be called first because it involves MPI communication. if self.__len__() == 0 or num_conns == 0: # Return empty tuple if get is called with an argument return {} if keys is None else () @@ -830,12 +843,11 @@ def get(self, keys=None, output=''): final_result = restructure_data(result, keys) if pandas_output: - index = (self.get('source') if self.__len__() > 1 else - (self.get('source'),)) + index = self.get("source") if self.__len__() > 1 else (self.get("source"),) if isinstance(keys, str): final_result = {keys: final_result} final_result = pandas.DataFrame(final_result, index=index) - elif output == 'json': + elif output == "json": final_result = to_json(final_result) return final_result @@ -870,12 +882,11 @@ def set(self, params=None, **kwargs): # This was added to ensure that the function is a nop (instead of, # for instance, raising an exception) when applied to an empty # SynapseCollection, or after having done a nest.ResetKernel(). - if self.__len__() == 0 or GetKernelStatus('network_size') == 0: + if self.__len__() == 0 or GetKernelStatus("network_size") == 0: return - if (isinstance(params, (list, tuple)) and - self.__len__() != len(params)): - raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__())) + if isinstance(params, (list, tuple)) and self.__len__() != len(params): + raise TypeError(f"status dict must be a dict, or a list of dicts of length {self.__len__()}") if kwargs and params is None: params = kwargs @@ -884,8 +895,10 @@ def set(self, params=None, **kwargs): if isinstance(params, dict): node_params = self[0].get() - contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for - key, vals in params.items()] + contains_list = [ + is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) + for key, vals in params.items() + ] if any(contains_list): temp_param = [{} for _ in range(self.__len__())] @@ -905,8 +918,7 @@ def disconnect(self): """ Disconnect the connections in the `SynapseCollection`. """ - sps(self._datum) - sr('Disconnect_a') + nestkernel.llapi_disconnect_syncoll(self._datum) class CollocatedSynapses: @@ -918,18 +930,19 @@ class CollocatedSynapses: Example ------- - :: + :: - nodes = nest.Create('iaf_psc_alpha', 3) - syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, - {'synapse_model': 'stdp_synapse'}, - {'synapse_model': 'stdp_synapse', 'alpha': 3.}) - nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) + nodes = nest.Create('iaf_psc_alpha', 3) + syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, + {'synapse_model': 'stdp_synapse'}, + {'synapse_model': 'stdp_synapse', 'alpha': 3.}) + nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) - conns = nest.GetConnections() + conns = nest.GetConnections() + + print(conns.alpha) + print(len(syn_spec)) - print(conns.alpha) - print(len(syn_spec)) """ def __init__(self, *args): @@ -952,26 +965,27 @@ class Mask: _datum = None # The constructor should not be called by the user - def __init__(self, datum): + def __init__(self, data): """Masks must be created using the CreateMask command.""" - if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype": - raise TypeError("expected mask Datum") - self._datum = datum + if not isinstance(data, nestkernel.MaskObject): + raise TypeError("Expected MaskObject.") + self._datum = data + # TODO-PYNEST-NG: Convert operators # Generic binary operation - def _binop(self, op, other): - if not isinstance(other, Mask): + def _binop(self, op, rhs): + if not isinstance(rhs, Mask): raise NotImplementedError() - return sli_func(op, self._datum, other._datum) + return sli_func(op, self._datum, rhs._datum) - def __or__(self, other): - return self._binop("or", other) + def __or__(self, rhs): + return self._binop("or", rhs) - def __and__(self, other): - return self._binop("and", other) + def __and__(self, rhs): + return self._binop("and", rhs) - def __sub__(self, other): - return self._binop("sub", other) + def __sub__(self, rhs): + return self._binop("sub", rhs) def Inside(self, point): """ @@ -987,7 +1001,7 @@ def Inside(self, point): out : bool True if the point is inside the mask, False otherwise """ - return sli_func("Inside", point, self._datum) + return nestkernel.llapi_inside_mask(point, self._datum) # TODO-PYNEST-NG: We may consider moving the entire (or most of) Parameter class to the cython level. @@ -1006,8 +1020,9 @@ class Parameter: def __init__(self, datum): """Parameters must be created using the CreateParameter command.""" if not isinstance(datum, nestkernel.ParameterObject): - raise TypeError("expected low-level parameter object;" - " use the CreateParameter() function to create a Parameter") + raise TypeError( + "expected low-level parameter object;" " use the CreateParameter() function to create a Parameter" + ) self._datum = datum def _arg_as_parameter(self, arg): @@ -1015,20 +1030,23 @@ def _arg_as_parameter(self, arg): return arg if isinstance(arg, (int, float)): # Value for the constant parameter must be float. - return CreateParameter('constant', {'value': float(arg)}) + return CreateParameter("constant", {"value": float(arg)}) raise NotImplementedError() def __add__(self, other): return nestkernel.llapi_add_parameter(self._datum, self._arg_as_parameter(other)._datum) - def __radd__(self, other): - return self + other + def __radd__(self, lhs): + return self + lhs def __sub__(self, other): return nestkernel.llapi_subtract_parameter(self._datum, self._arg_as_parameter(other)._datum) - def __rsub__(self, other): - return self * (-1) + other + def __rsub__(self, lhs): + return self * (-1) + lhs + + def __pos__(self): + return self def __neg__(self): return self * (-1) @@ -1036,8 +1054,8 @@ def __neg__(self): def __mul__(self, other): return nestkernel.llapi_multiply_parameter(self._datum, self._arg_as_parameter(other)._datum) - def __rmul__(self, other): - return self * other + def __rmul__(self, lhs): + return self * lhs def __div__(self, other): return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) @@ -1046,25 +1064,25 @@ def __truediv__(self, other): return nestkernel.llapi_divide_parameter(self._datum, self._arg_as_parameter(other)._datum) def __pow__(self, exponent): - return nestkernel.llapi_pow_parameter(self._datum, self._arg_as_parameter(float(exponent))._datum) + return nestkernel.llapi_pow_parameter(self._datum, float(exponent)) def __lt__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 0}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 0}) def __le__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 1}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 1}) def __eq__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 2}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 2}) def __ne__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 3}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 3}) def __ge__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 4}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 4}) def __gt__(self, other): - return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {'comparator': 5}) + return nestkernel.llapi_compare_parameter(self._datum, self._arg_as_parameter(other)._datum, {"comparator": 5}) def GetValue(self): """ @@ -1101,24 +1119,25 @@ def apply(self, spatial_nc, positions=None): return nestkernel.llapi_apply_parameter(self._datum, spatial_nc) else: if len(spatial_nc) != 1: - raise ValueError('The NodeCollection must contain a single node ID only') + raise ValueError("The NodeCollection must contain a single node ID only") if not isinstance(positions, (list, tuple)): - raise TypeError('Positions must be a list or tuple of positions') + raise TypeError("Positions must be a list or tuple of positions") for pos in positions: if not isinstance(pos, (list, tuple, numpy.ndarray)): - raise TypeError('Each position must be a list or tuple') + raise TypeError("Each position must be a list or tuple") if len(pos) != len(positions[0]): - raise ValueError('All positions must have the same number of dimensions') - return nestkernel.llapi_apply_parameter(self._datum, {'source': spatial_nc, 'targets': positions}) + raise ValueError("All positions must have the same number of dimensions") + return nestkernel.llapi_apply_parameter(self._datum, {"source": spatial_nc, "targets": positions}) class CmBase: - def __init__(self, node_collection, elements): if not isinstance(node_collection, NodeCollection): - raise TypeError(f'node_collection must be a NodeCollection, got {type(node_collection)}') + raise TypeError(f"node_collection must be a NodeCollection, got {type(node_collection)}") + if isinstance(elements, list): + elements = tuple(elements) if not isinstance(elements, tuple): - raise TypeError(f'elements must be a tuple of dicts, got {type(elements)}') + raise TypeError(f"elements must be a tuple of dicts, got {type(elements)}") self._elements = elements self._node_collection = node_collection @@ -1129,14 +1148,17 @@ def __add__(self, other): elif isinstance(other, (tuple, list)): if not all(isinstance(d, dict) for d in other): raise TypeError( - f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' - f'or other {self.__class__.__name__}') + f"{self.__class__.__name__} can only be added with dicts, lists of dicts, " + f"or other {self.__class__.__name__}" + ) new_elements += list(other) elif isinstance(other, self.__class__): new_elements += list(other._elements) else: - raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' - f' or other {self.__class__.__name__}, got {type(other)}') + raise NotImplementedError( + f"{self.__class__.__name__} can only be added with dicts, lists of dicts," + f" or other {self.__class__.__name__}, got {type(other)}" + ) return self.__class__(self._node_collection, tuple(new_elements)) @@ -1145,15 +1167,19 @@ def __iadd__(self, other): new_elements = [other] elif isinstance(other, (tuple, list)): if not all(isinstance(d, dict) for d in other): - raise TypeError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts, ' - f'or other {self.__class__.__name__}') + raise TypeError( + f"{self.__class__.__name__} can only be added with dicts, lists of dicts, " + f"or other {self.__class__.__name__}" + ) new_elements = list(other) elif isinstance(other, self.__class__): new_elements = list(other._elements) else: - raise NotImplementedError(f'{self.__class__.__name__} can only be added with dicts, lists of dicts,' - f' or other {self.__class__.__name__}, got {type(other)}') - self._node_collection.set({f'add_{self.__class__.__name__.lower()}': new_elements}) + raise NotImplementedError( + f"{self.__class__.__name__} can only be added with dicts, lists of dicts," + f" or other {self.__class__.__name__}, got {type(other)}" + ) + self._node_collection.set({f"add_{self.__class__.__name__.lower()}": new_elements}) return None # Flagging elements as added by returning None def __getitem__(self, key): @@ -1176,8 +1202,8 @@ class Receptors(CmBase): pass -def serializable(data): - """Make data serializable for JSON. +def serialize_data(data): + """Serialize data for JSON. Parameters ---------- @@ -1191,7 +1217,7 @@ def serializable(data): if isinstance(data, (numpy.ndarray, NodeCollection)): return data.tolist() - if isinstance(data, SynapseCollection): + elif isinstance(data, SynapseCollection): # Get full information from SynapseCollection return serializable(data.get()) if isinstance(data, (list, tuple)): @@ -1202,7 +1228,7 @@ def serializable(data): def to_json(data, **kwargs): - """Serialize data to JSON. + """Convert the object to a JSON string. Parameters ---------- @@ -1213,9 +1239,9 @@ def to_json(data, **kwargs): Returns ------- data_json : str - JSON format of the data + JSON string format of the data """ - data_serialized = serializable(data) + data_serialized = serialize_data(data) data_json = json.dumps(data_serialized, **kwargs) return data_json diff --git a/pynest/nest/logic/hl_api_logic.py b/pynest/nest/logic/hl_api_logic.py index eafa67642c..210816b0cc 100644 --- a/pynest/nest/logic/hl_api_logic.py +++ b/pynest/nest/logic/hl_api_logic.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# _hl_api_logic.py +# hl_api_logic.py # # This file is part of NEST. # @@ -19,11 +19,11 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ..lib.hl_api_types import CreateParameter from .. import nestkernel_api as nestkernel +from ..lib.hl_api_types import CreateParameter __all__ = [ - 'conditional', + "conditional", ] @@ -46,9 +46,7 @@ def conditional(condition, param_if_true, param_if_false): Object representing the conditional. """ if isinstance(param_if_true, (int, float)): - param_if_true = CreateParameter( - 'constant', {'value': float(param_if_true)}) + param_if_true = CreateParameter("constant", {"value": float(param_if_true)}) if isinstance(param_if_false, (int, float)): - param_if_false = CreateParameter( - 'constant', {'value': float(param_if_false)}) + param_if_false = CreateParameter("constant", {"value": float(param_if_false)}) return nestkernel.llapi_conditional_parameter(condition._datum, param_if_true._datum, param_if_false._datum) diff --git a/pynest/nest/random/__init__.py b/pynest/nest/random/__init__.py index 1eebdde30c..03f911cd18 100644 --- a/pynest/nest/random/__init__.py +++ b/pynest/nest/random/__init__.py @@ -19,4 +19,4 @@ # You should have received a copy of the GNU General Public License # along with NEST. If not, see . -from ._hl_api_random import * # noqa: F401,F403 +from .hl_api_random import * # noqa: F401,F403 diff --git a/pynest/nest/raster_plot.py b/pynest/nest/raster_plot.py new file mode 100644 index 0000000000..adf64a2ce0 --- /dev/null +++ b/pynest/nest/raster_plot.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +# +# raster_plot.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" Functions for raster plotting.""" + +import nest +import numpy + +__all__ = ["extract_events", "from_data", "from_device", "from_file", "from_file_numpy", "from_file_pandas"] + + +def extract_events(data, time=None, sel=None): + """Extract all events within a given time interval. + + Both time and sel may be used at the same time such that all + events are extracted for which both conditions are true. + + Parameters + ---------- + data : list + Matrix such that + data[:,0] is a vector of all node_ids and + data[:,1] a vector with the corresponding time stamps. + time : list, optional + List with at most two entries such that + time=[t_max] extracts all events with t< t_max + time=[t_min, t_max] extracts all events with t_min <= t < t_max + sel : list, optional + List of node_ids such that + sel=[node_id1, ... , node_idn] extracts all events from these node_ids. + All others are discarded. + + Returns + ------- + numpy.array + List of events as (node_id, t) tuples + """ + val = [] + + if time: + t_max = time[-1] + if len(time) > 1: + t_min = time[0] + else: + t_min = 0 + + for v in data: + t = v[1] + node_id = v[0] + if time and (t < t_min or t >= t_max): + continue + if not sel or node_id in sel: + val.append(v) + + return numpy.array(val) + + +def from_data(data, sel=None, **kwargs): + """Plot raster plot from data array. + + Parameters + ---------- + data : list + Matrix such that + data[:,0] is a vector of all node_ids and + data[:,1] a vector with the corresponding time stamps. + sel : list, optional + List of node_ids such that + sel=[node_id1, ... , node_idn] extracts all events from these node_ids. + All others are discarded. + kwargs: + Parameters passed to _make_plot + """ + if len(data) == 0: + raise nest.NESTError("No data to plot.") + ts = data[:, 1] + d = extract_events(data, sel=sel) + ts1 = d[:, 1] + node_ids = d[:, 0] + + return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs) + + +def from_file(fname, **kwargs): + """Plot raster from file. + + Parameters + ---------- + fname : str or tuple(str) or list(str) + File name or list of file names + + If a list of files is given, the data from them is concatenated as if + it had been stored in a single file - useful when MPI is enabled and + data is logged separately for each MPI rank, for example. + kwargs: + Parameters passed to _make_plot + """ + if isinstance(fname, str): + fname = [fname] + + if isinstance(fname, (list, tuple)): + try: + global pandas + pandas = __import__("pandas") + from_file_pandas(fname, **kwargs) + except ImportError: + from_file_numpy(fname, **kwargs) + else: + print("fname should be one of str/list(str)/tuple(str).") + + +def from_file_pandas(fname, **kwargs): + """Use pandas.""" + data = None + for f in fname: + dataFrame = pandas.read_table(f, header=2, skipinitialspace=True) + newdata = dataFrame.values + + if data is None: + data = newdata + else: + data = numpy.concatenate((data, newdata)) + + return from_data(data, **kwargs) + + +def from_file_numpy(fname, **kwargs): + """Use numpy.""" + data = None + for f in fname: + newdata = numpy.loadtxt(f, skiprows=3) + + if data is None: + data = newdata + else: + data = numpy.concatenate((data, newdata)) + + return from_data(data, **kwargs) + + +def from_device(detec, **kwargs): + """ + Plot raster from a spike recorder. + + Parameters + ---------- + detec : TYPE + Description + kwargs: + Parameters passed to _make_plot + + Raises + ------ + nest.NESTError + """ + + type_id = nest.GetDefaults(detec.get("model"), "type_id") + if not type_id == "spike_recorder": + raise nest.NESTError("Please provide a spike_recorder.") + + if detec.get("record_to") == "memory": + ts, node_ids = _from_memory(detec) + + if not len(ts): + raise nest.NESTError("No events recorded!") + + if "title" not in kwargs: + kwargs["title"] = "Raster plot from device '%i'" % detec.get("global_id") + + if detec.get("time_in_steps"): + xlabel = "Steps" + else: + xlabel = "Time (ms)" + + return _make_plot(ts, ts, node_ids, node_ids, xlabel=xlabel, **kwargs) + + elif detec.get("record_to") == "ascii": + fname = detec.get("filenames") + return from_file(fname, **kwargs) + + else: + raise nest.NESTError("No data to plot. Make sure that record_to is set to either 'ascii' or 'memory'.") + + +def _from_memory(detec): + ev = detec.get("events") + return ev["times"], ev["senders"] + + +def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0, grayscale=False, title=None, xlabel=None): + """Generic plotting routine. + + Constructs a raster plot along with an optional histogram (common part in + all routines above). + + Parameters + ---------- + ts : list + All timestamps + ts1 : list + Timestamps corresponding to node_ids + node_ids : list + Global ids corresponding to ts1 + neurons : list + Node IDs of neurons to plot + hist : bool, optional + Display histogram + hist_binwidth : float, optional + Width of histogram bins + grayscale : bool, optional + Plot in grayscale + title : str, optional + Plot title + xlabel : str, optional + Label for x-axis + """ + import matplotlib.pyplot as plt + + plt.figure() + + if grayscale: + color_marker = ".k" + color_bar = "gray" + else: + color_marker = "." + color_bar = "blue" + + color_edge = "black" + + if xlabel is None: + xlabel = "Time (ms)" + + ylabel = "Neuron ID" + + if hist: + ax1 = plt.axes([0.1, 0.3, 0.85, 0.6]) + plotid = plt.plot(ts1, node_ids, color_marker) + plt.ylabel(ylabel) + plt.xticks([]) + xlim = plt.xlim() + + plt.axes([0.1, 0.1, 0.85, 0.17]) + t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), float(hist_binwidth)) + n, _ = _histogram(ts, bins=t_bins) + num_neurons = len(numpy.unique(neurons)) + heights = 1000 * n / (hist_binwidth * num_neurons) + + plt.bar(t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge) + plt.yticks([int(x) for x in numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)]) + plt.ylabel("Rate (Hz)") + plt.xlabel(xlabel) + plt.xlim(xlim) + plt.axes(ax1) + else: + plotid = plt.plot(ts1, node_ids, color_marker) + plt.xlabel(xlabel) + plt.ylabel(ylabel) + + if title is None: + plt.title("Raster plot") + else: + plt.title(title) + + plt.draw() + + return plotid + + +def _histogram(a, bins=10, bin_range=None, normed=False): + """Calculate histogram for data. + + Parameters + ---------- + a : list + Data to calculate histogram for + bins : int, optional + Number of bins + bin_range : TYPE, optional + Range of bins + normed : bool, optional + Whether distribution should be normalized + + Raises + ------ + ValueError + """ + from numpy import asarray, concatenate, iterable, linspace, sort + + a = asarray(a).ravel() + + if bin_range is not None: + mn, mx = bin_range + if mn > mx: + raise ValueError("max must be larger than min in range parameter") + + if not iterable(bins): + if bin_range is None: + bin_range = (a.min(), a.max()) + mn, mx = [mi + 0.0 for mi in bin_range] + if mn == mx: + mn -= 0.5 + mx += 0.5 + bins = linspace(mn, mx, bins, endpoint=False) + else: + if (bins[1:] - bins[:-1] < 0).any(): + raise ValueError("bins must increase monotonically") + + # best block size probably depends on processor cache size + block = 65536 + n = sort(a[:block]).searchsorted(bins) + for i in range(block, a.size, block): + n += sort(a[i : i + block]).searchsorted(bins) + n = concatenate([n, [len(a)]]) + n = n[1:] - n[:-1] + + if normed: + db = bins[1] - bins[0] + return 1.0 / (a.size * db) * n, bins + else: + return n, bins diff --git a/pynest/nest/versionchecker.py.in b/pynest/nest/versionchecker.py.in new file mode 100644 index 0000000000..0d71dde2aa --- /dev/null +++ b/pynest/nest/versionchecker.py.in @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# versionchecker.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + + +""" +Check that the Python compiletime and runtime versions match. + +""" + +v_major_mismatch = sys.version_info.major != @Python_VERSION_MAJOR@ +v_minor_mismatch = sys.version_info.minor != @Python_VERSION_MINOR@ +if v_major_mismatch or v_minor_mismatch: + msg = ("Python runtime version does not match 'nest' compiletime version. " + + "Please use Python @Python_VERSION_MAJOR@.@Python_VERSION_MINOR@.") + raise Exception(msg) diff --git a/pynest/nest/visualization.py b/pynest/nest/visualization.py new file mode 100644 index 0000000000..58cb6a442f --- /dev/null +++ b/pynest/nest/visualization.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# +# visualization.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions to visualize a network built in NEST. +""" + +import nest +import pydot + +__all__ = [ + "plot_network", +] + + +def plot_network(nodes, filename, ext_conns=False, plot_modelnames=False): + """Plot the given nodes and the connections that originate from + them. + + This function depends on the availability of the pydot module. + + Simplified version for NEST 3. + + Parameters + ---------- + nodes : NodeCollection + NodeCollection containing node IDs of nodes to plot + filename : str + Filename to save the plot to. Can end either in .pdf or .png to + determine the type of the output. + ext_conns : bool, optional + Draw connections to targets that are not in nodes. If it is True, + these are drawn to a node named 'ext'. + plot_modelnames : bool, optional + Description + + Raises + ------ + nest.NESTError + """ + + if len(nodes) == 0: + nest.NESTError("nodes must at least contain one node") + + if not isinstance(nodes, nest.NodeCollection): + raise nest.NESTError("nodes must be a NodeCollection") + + if ext_conns: + raise NotImplementedError("ext_conns") + if plot_modelnames: + raise NotImplementedError("plot_modelnames") + + conns = nest.GetConnections(nodes) + + graph = pydot.Dot(rankdir="LR", ranksep="5") + for source, target in zip(conns.sources(), conns.targets()): + graph.add_edge(pydot.Edge(str(source), str(target))) + + filetype = filename.rsplit(".", 1)[1] + if filetype == "pdf": + graph.write_pdf(filename) + elif filetype == "png": + graph.write_png(filename) + else: + raise nest.NESTError("Filename must end in '.png' or '.pdf'.") diff --git a/pynest/nest/voltage_trace.py b/pynest/nest/voltage_trace.py new file mode 100644 index 0000000000..dd6d93bc30 --- /dev/null +++ b/pynest/nest/voltage_trace.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# +# voltage_trace.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +""" +Functions to plot voltage traces. +""" + +import nest +import numpy + +__all__ = [ + "from_device", + "from_file", +] + + +def from_file(fname, title=None, grayscale=False): + """Plot voltage trace from file. + + Parameters + ---------- + fname : str or list + Filename or list of filenames to load from + title : str, optional + Plot title + grayscale : bool, optional + Plot in grayscale + + Raises + ------ + ValueError + """ + import matplotlib.pyplot as plt + + if isinstance(fname, (list, tuple)): + data = None + for file in fname: + if data is None: + data = numpy.loadtxt(file) + else: + data = numpy.concatenate((data, numpy.loadtxt(file))) + else: + data = numpy.loadtxt(fname) + + if grayscale: + line_style = "k" + else: + line_style = "" + + if len(data.shape) == 1: + print( + "INFO: only found 1 column in the file. \ + Assuming that only one neuron was recorded." + ) + plotid = plt.plot(data, line_style) + plt.xlabel("Time (steps of length interval)") + + elif data.shape[1] == 2: + print( + "INFO: found 2 columns in the file. Assuming \ + them to be node ID, pot." + ) + + plotid = [] + data_dict = {} + for dat in data: + if not dat[0] in data_dict: + data_dict[dat[0]] = [dat[1]] + else: + data_dict[dat[0]].append(dat[1]) + + for dat in data_dict: + plotid.append(plt.plot(data_dict[dat], line_style, label="Neuron %i" % dat)) + + plt.xlabel("Time (steps of length interval)") + plt.legend() + + elif data.shape[1] == 3: + plotid = [] + data_dict = {} + g = data[0][0] + t = [] + for d in data: + if not d[0] in data_dict: + data_dict[d[0]] = [d[2]] + else: + data_dict[d[0]].append(d[2]) + if d[0] == g: + t.append(d[1]) + + for d in data_dict: + plotid.append(plt.plot(t, data_dict[d], line_style, label="Neuron %i" % d)) + + plt.xlabel("Time (ms)") + plt.legend() + + else: + raise ValueError("Inappropriate data shape %i!" % data.shape) + + if not title: + title = "Membrane potential from file '%s'" % fname + + plt.title(title) + plt.ylabel("Membrane potential (mV)") + plt.draw() + + return plotid + + +def from_device(detec, neurons=None, title=None, grayscale=False, timeunit="ms"): + """Plot the membrane potential of a set of neurons recorded by + the given voltmeter or multimeter. + + Parameters + ---------- + detec : list + Global id of voltmeter or multimeter in a list, e.g. [1] + neurons : list, optional + Indices of of neurons to plot + title : str, optional + Plot title + grayscale : bool, optional + Plot in grayscale + timeunit : str, optional + Unit of time + + Raises + ------ + nest.NESTError + Description + """ + import matplotlib.pyplot as plt + + if len(detec) > 1: + raise nest.NESTError("Please provide a single voltmeter.") + + type_id = nest.GetDefaults(detec.get("model"), "type_id") + if type_id not in ("voltmeter", "multimeter"): + raise nest.NESTError( + "Please provide a voltmeter or a \ + multimeter measuring V_m." + ) + elif type_id == "multimeter": + if "V_m" not in detec.get("record_from"): + raise nest.NESTError( + "Please provide a multimeter \ + measuring V_m." + ) + elif not detec.get("record_to") == "memory" and len(detec.get("record_from")) > 1: + raise nest.NESTError( + "Please provide a multimeter \ + measuring only V_m or record to memory!" + ) + + if detec.get("record_to") == "memory": + timefactor = 1.0 + if not detec.get("time_in_steps"): + if timeunit == "s": + timefactor = 1000.0 + else: + timeunit = "ms" + + times, voltages = _from_memory(detec) + + if not len(times): + raise nest.NESTError("No events recorded!") + + if neurons is None: + neurons = voltages.keys() + + plotids = [] + for neuron in neurons: + time_values = numpy.array(times[neuron]) / timefactor + + if grayscale: + line_style = "k" + else: + line_style = "" + + try: + plotids.append(plt.plot(time_values, voltages[neuron], line_style, label="Neuron %i" % neuron)) + except KeyError: + print("INFO: Wrong ID: {0}".format(neuron)) + + if not title: + title = "Membrane potential" + plt.title(title) + + plt.ylabel("Membrane potential (mV)") + + if detec.time_in_steps: + plt.xlabel("Steps") + else: + plt.xlabel("Time (%s)" % timeunit) + + plt.legend(loc="best") + plt.draw() + + return plotids + + elif detec.get("record_to") == "ascii": + fname = detec.get("filenames") + return from_file(fname, title, grayscale) + else: + raise nest.NESTError("Provided devices neither record to ascii file, nor to memory.") + + +def _from_memory(detec): + """Get voltage traces from memory. + ---------- + detec : list + Global id of voltmeter or multimeter + """ + import array + + ev = detec.get("events") + potentials = ev["V_m"] + senders = ev["senders"] + + v = {} + t = {} + + if "times" in ev: + times = ev["times"] + for s, currentsender in enumerate(senders): + if currentsender not in v: + v[currentsender] = array.array("f") + t[currentsender] = array.array("f") + + v[currentsender].append(float(potentials[s])) + t[currentsender].append(float(times[s])) + else: + # reconstruct the time vector, if not stored explicitly + origin = detec.get("origin") + start = detec.get("start") + interval = detec.get("interval") + senders_uniq = numpy.unique(senders) + num_intvls = len(senders) / len(senders_uniq) + times_s = origin + start + interval + interval * numpy.array(range(num_intvls)) + + for s, currentsender in enumerate(senders): + if currentsender not in v: + v[currentsender] = array.array("f") + t[currentsender] = times_s + v[currentsender].append(float(potentials[s])) + + return t, v From 214ba6b566152b4dc163a11ab5d9232f2be81ec3 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Tue, 19 Sep 2023 11:18:12 +0200 Subject: [PATCH 14/17] added prep script prints --- build_support/prepare_wheel_container.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/build_support/prepare_wheel_container.py b/build_support/prepare_wheel_container.py index ab8dba4048..7cebfff75e 100644 --- a/build_support/prepare_wheel_container.py +++ b/build_support/prepare_wheel_container.py @@ -11,13 +11,21 @@ def main(): + print("Installing libgomp...") install_omp() + print("Done.") # Containers run multiple builds, so check if a previous build has installed the # dependency already if not os.path.exists("/boost"): + print(f"Installing Boost {version(BOOST_VERSION)}...") install_boost() + print("Done.") if not os.path.exists("/gsl"): + print(f"Installing GSL {version(BOOST_VERSION)}...") install_gsl() + print("Done.") + print("Container preparation complete.") + print() def run_sequence(seq): @@ -61,7 +69,7 @@ def install_gsl(): def install_omp(): """Use the yum package manager of CentOS to install OpemMP libraries""" install_seq = ( - "yum install libomp-dev", + "yum install libgomp", ) run_sequence(install_seq) From 3aa80e082f34f9fc787baf646302fb792e4005ca Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Tue, 19 Sep 2023 15:23:03 +0200 Subject: [PATCH 15/17] add REQUIRED to OpenMP find_package. solve openmp not found in manylinux --- cmake/ProcessOptions.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/ProcessOptions.cmake b/cmake/ProcessOptions.cmake index b8954b2303..6c02f8505c 100644 --- a/cmake/ProcessOptions.cmake +++ b/cmake/ProcessOptions.cmake @@ -437,7 +437,7 @@ function( NEST_PROCESS_WITH_OPENMP ) set( OpenMP_C_FLAGS "${with-openmp}" ) set( OpenMP_CXX_FLAGS "${with-openmp}" ) else () - find_package( OpenMP ) + find_package( OpenMP REQUIRED ) endif () if ( OPENMP_FOUND ) # export found variables to parent scope From 1e1ad3bce2fe61479ed46bce0007686b9e88b99d Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Tue, 19 Sep 2023 17:00:58 +0200 Subject: [PATCH 16/17] reintroduced -DCMAKE_MODULE_PATH with note --- pyproject.toml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0a785912a6..2340c6e819 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,8 +15,11 @@ build-verbosity = 3 [tool.cibuildwheel.environment] NEST_CMAKE_BUILDWHEEL="ON" -NEST_INSTALL_NODOC=true -CMAKE_ARGS="-DCMAKE_MODULE_PATH=/project/cmake" +# We have to set the CMAKE_MODULE_PATH to have our own FindCython loaded before skbuild's +# Cython. I was unable to append the correct skbuild modules path as well. It is quite +# variable per build. The flag is passed twice, let's hope they stack up. Example: +# -DCMAKE_MODULE_PATH:PATH=/tmp/pip-build-env-po15qu7j/overlay/lib/python3.9/site-packages/skbuild/resources/cmake +CMAKE_ARGS="-DCMAKE_MODULE_PATH=/project/cmake -Dwith-userdoc=OFF" BOOST_ROOT="/boost" GSL_ROOT_DIR="/gsl" From e7112c8706ec27c4159645a0b7a06199a58b2705 Mon Sep 17 00:00:00 2001 From: Robin De Schepper Date: Mon, 23 Oct 2023 11:45:15 +0200 Subject: [PATCH 17/17] trim down wheel build --- CMakeLists.txt | 28 ++++++----- build_support/prepare_wheel_container.py | 21 ++++++-- cmake/FindCython.cmake | 7 +-- cmake/ProcessOptions.cmake | 14 +++--- doc/CMakeLists.txt | 63 ------------------------ examples/CMakeLists.txt | 22 --------- pynest/CMakeLists.txt | 7 +-- testsuite/CMakeLists.txt | 41 --------------- 8 files changed, 47 insertions(+), 156 deletions(-) delete mode 100644 doc/CMakeLists.txt delete mode 100644 examples/CMakeLists.txt delete mode 100644 testsuite/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index b7786333d3..283ceeec79 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,7 +26,20 @@ project( nest CXX C ) set( CMAKE_CXX_STANDARD 20 ) set( CMAKE_CXX_STANDARD_REQUIRED True ) -find_package(OpenMP REQUIRED) +set( NEST_WHEELBUILD DEFINED ENV{NEST_CMAKE_BUILDWHEEL} ) +if ( NEST_WHEELBUILD ) + # Big warning that the wheel build reserves the right to destroy everything, as it + # should only run on throwaway containers. + message(WARNING "RUNNING WHEEL BUILD, DO NOT RUN ON LOCAL MACHINE, MAY CAUSE IRREVERSIBLE CHANGES!") +endif () +if ( with-openmp ) + if ( "${with-openmp}" STREQUAL "ON" ) + # This should be called from cmake/ProcessOptions.cmake:NEST_PROCESS_WITH_OPENMP + # but alas, on some unknown combination of newer versions not all compiler flags are + # found there, yet magically, are correctly found when we make the call here. + find_package( OpenMP REQUIRED ) + endif () +endif () set( NEST_USER_EMAIL "users@nest-simulator.org" ) @@ -134,6 +147,7 @@ get_target_triple( NEST_TARGET_TRIPLE NEST_TARGET_ARCH NEST_TARGET_VENDOR NEST_T nest_process_with_python() include( GNUInstallDirs ) nest_post_process_with_python() +message( STATUS "DID WE FIND UTILITY_PYTHON? ${UTILITY_PYTHON}") nest_process_with_intel_compiler_flags() nest_process_with_warning() nest_process_with_libraries() @@ -225,15 +239,12 @@ add_custom_target( installcheck ################## Define Subdirectories here ################## ################################################################################ -add_subdirectory( doc ) add_subdirectory( bin ) -add_subdirectory( examples ) add_subdirectory( build_support ) add_subdirectory( libnestutil ) add_subdirectory( models ) add_subdirectory( nestkernel ) add_subdirectory( thirdparty ) -add_subdirectory( testsuite ) if ( HAVE_PYTHON ) add_subdirectory( pynest ) endif () @@ -335,11 +346,6 @@ configure_file( "${PROJECT_BINARY_DIR}/bin/nest_vars.sh" @ONLY ) -configure_file( - "${PROJECT_SOURCE_DIR}/doc/fulldoc.conf.in" - "${PROJECT_BINARY_DIR}/doc/fulldoc.conf" @ONLY -) - ################################################################################ ################## Install Extra Files ################## ################################################################################ @@ -348,8 +354,4 @@ install( FILES LICENSE README.md DESTINATION ${CMAKE_INSTALL_DOCDIR} ) -install( DIRECTORY examples/ - DESTINATION ${CMAKE_INSTALL_DOCDIR}/examples -) - nest_print_config_summary() diff --git a/build_support/prepare_wheel_container.py b/build_support/prepare_wheel_container.py index 7cebfff75e..7e4feb34ec 100644 --- a/build_support/prepare_wheel_container.py +++ b/build_support/prepare_wheel_container.py @@ -24,6 +24,7 @@ def main(): print(f"Installing GSL {version(BOOST_VERSION)}...") install_gsl() print("Done.") + strip_wheel_bulk() print("Container preparation complete.") print() @@ -66,12 +67,26 @@ def install_gsl(): ) run_sequence(install_seq) + def install_omp(): """Use the yum package manager of CentOS to install OpemMP libraries""" - install_seq = ( - "yum install libgomp", - ) + install_seq = ("yum install libgomp",) run_sequence(install_seq) +def strip_wheel_bulk(): + from shutil import rmtree, move + from os import makedirs + + move("/project/doc/copyright_header.cpp", "/project") + move("/project/doc/copyright_header.py", "/project") + + rmtree("/project/doc") + rmtree("/project/examples") + makedirs("/project/doc") + + move("/project/copyright_header.cpp", "/project/doc") + move("/project/copyright_header.py", "/project/doc") + + main() diff --git a/cmake/FindCython.cmake b/cmake/FindCython.cmake index c8de131125..ed48f567af 100644 --- a/cmake/FindCython.cmake +++ b/cmake/FindCython.cmake @@ -56,9 +56,9 @@ else() endif() if(CYTHON_EXECUTABLE) - set(CYTHON_version_command ${CYTHON_EXECUTABLE} --version) + set(CYTHON_version_command "${CYTHON_EXECUTABLE} --version") - execute_process(COMMAND ${CYTHON_version_command} + execute_process(COMMAND ${CYTHON_EXECUTABLE} --version OUTPUT_VARIABLE CYTHON_version_output ERROR_VARIABLE CYTHON_version_error RESULT_VARIABLE CYTHON_version_result @@ -66,8 +66,9 @@ if(CYTHON_EXECUTABLE) ERROR_STRIP_TRAILING_WHITESPACE) if(NOT ${CYTHON_version_result} EQUAL 0) + cmake_path(GET CYTHON_EXECUTABLE PARENT_PATH result) set(_error_msg "Command \"${CYTHON_version_command}\" failed with") - set(_error_msg "${_error_msg} output:\n${CYTHON_version_error}") + set(_error_msg "${_error_msg} output:\n${CYTHON_version_error}${CYTHON_version_result}${CYTHON_version_output}") message(SEND_ERROR "${_error_msg}") else() if("${CYTHON_version_output}" MATCHES "^[Cc]ython version ([^,]+)") diff --git a/cmake/ProcessOptions.cmake b/cmake/ProcessOptions.cmake index 653615896c..fcf56daaf0 100644 --- a/cmake/ProcessOptions.cmake +++ b/cmake/ProcessOptions.cmake @@ -420,6 +420,10 @@ endfunction() function( NEST_POST_PROCESS_WITH_PYTHON ) if ( Python_FOUND ) set( PYEXECDIR "${CMAKE_INSTALL_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages" PARENT_SCOPE ) + set( UTILITY_PYTHON "${Python_EXECUTABLE}" PARENT_SCOPE) + else () + find_package( Python 3 REQUIRED Interpreter ) + set( UTILITY_PYTHON "${Python_EXECUTABLE}" PARENT_SCOPE) endif() endfunction() @@ -443,7 +447,7 @@ function( NEST_PROCESS_WITH_OPENMP ) # set flags set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}" PARENT_SCOPE ) set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}" PARENT_SCOPE ) - else() + else () printError( "CMake can not find OpenMP." ) endif () endif () @@ -654,16 +658,14 @@ function( NEST_PROCESS_MODELS ) endif () file(STRINGS "${PROJECT_SOURCE_DIR}/modelsets/${with-modelset}" BUILTIN_MODELS) endif() - - # We use python3 here directly, as some of the CI jobs don't seem to have PYTHON - # or Python_EXECUTABLE set properly. + message( STATUS "UTIL? ${UTILITY_PYTHON}") execute_process( - COMMAND "python3" "${PROJECT_SOURCE_DIR}/build_support/generate_modelsmodule.py" + COMMAND "${UTILITY_PYTHON}" "${PROJECT_SOURCE_DIR}/build_support/generate_modelsmodule.py" "${PROJECT_SOURCE_DIR}" "${PROJECT_BINARY_DIR}" "${BUILTIN_MODELS}" WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" OUTPUT_VARIABLE MODELS_SOURCES ERROR_VARIABLE MODELS_SOURCES_ERROR - # Uncomment for debugging: ECHO_OUTPUT_VARIABLE ECHO_ERROR_VARIABLE COMMAND_ECHO STDOUT + ECHO_OUTPUT_VARIABLE ECHO_ERROR_VARIABLE COMMAND_ECHO STDOUT COMMAND_ERROR_IS_FATAL ANY ) diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt deleted file mode 100644 index 3346b6c456..0000000000 --- a/doc/CMakeLists.txt +++ /dev/null @@ -1,63 +0,0 @@ -# doc/CMakeLists.txt -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -if( BUILD_DOCS ) - # If we hit this, any part of the documentation was configured to be built. - # The top-level 'docs' target will contain all sub-documentations such as `sphinxdocs` - # and `doxygendocs`. Using `ALL` we make it run on `make install` as well. - add_custom_target( docs ALL ) -endif() - -# Determine in or out of tree building -if ( "${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}" ) - set( OUT_OF_TREE_BUILD "False" ) -else () - set( OUT_OF_TREE_BUILD "True" ) -endif () - - -if ( BUILD_SPHINX_DOCS ) - message( STATUS "Configuring Sphinx documentation" ) - set( _SPHINX_SOURCE_DIR "${PROJECT_SOURCE_DIR}/doc/htmldoc" ) - set( _SPHINX_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build/html" ) - add_custom_target( sphinxdocs - WORKING_DIRECTORY ${_SPHINX_SOURCE_DIR} - COMMAND ${Python_EXECUTABLE} clean_source_dirs.py - COMMAND ${SPHINX_EXECUTABLE} -b html . ${_SPHINX_BUILD_DIR} - COMMAND ${Python_EXECUTABLE} resolve_includes.py ${SPHINX_BUILD_DIR}/models - ) - - add_dependencies( docs sphinxdocs ) - - install( DIRECTORY ${_SPHINX_BUILD_DIR} - DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DOCDIR} - OPTIONAL - ) - install( DIRECTORY logos - DESTINATION ${CMAKE_INSTALL_DOCDIR} - ) -endif () - -if ( BUILD_DOXYGEN_DOCS ) - add_custom_target( doxygendocs - COMMAND ${DOXYGEN_EXECUTABLE} "${CMAKE_CURRENT_BINARY_DIR}/fulldoc.conf" - WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" - ) - add_dependencies( docs doxygendocs ) -endif () diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt deleted file mode 100644 index b1c834a050..0000000000 --- a/examples/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -# CMakeLists.txt -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -install( FILES run_examples.sh - DESTINATION ${CMAKE_INSTALL_DOCDIR} - ) diff --git a/pynest/CMakeLists.txt b/pynest/CMakeLists.txt index b8844ed3ea..ef2c450385 100644 --- a/pynest/CMakeLists.txt +++ b/pynest/CMakeLists.txt @@ -20,14 +20,11 @@ find_package(Cython) if ( HAVE_PYTHON ) - - # We use python3 here directly, as some of the CI jobs don't seem to have PYTHON - # or Python_EXECUTABLE set properly. execute_process( - COMMAND "python3" "${PROJECT_SOURCE_DIR}/pynest/generate_exception_header.py" + COMMAND "${UTILITY_PYTHON}" "${PROJECT_SOURCE_DIR}/pynest/generate_exception_header.py" "${PROJECT_SOURCE_DIR}" "${PROJECT_BINARY_DIR}" WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - # Uncomment for debugging: ECHO_OUTPUT_VARIABLE ECHO_ERROR_VARIABLE COMMAND_ECHO STDOUT + ECHO_OUTPUT_VARIABLE ECHO_ERROR_VARIABLE COMMAND_ECHO STDOUT COMMAND_ERROR_IS_FATAL ANY ) diff --git a/testsuite/CMakeLists.txt b/testsuite/CMakeLists.txt deleted file mode 100644 index d9c445a873..0000000000 --- a/testsuite/CMakeLists.txt +++ /dev/null @@ -1,41 +0,0 @@ -# testsuite/CMakeLists.txt -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . - -set( TESTSUBDIRS - regressiontests - cpptests - pytests -) - -add_subdirectory( regressiontests ) -add_subdirectory( cpptests ) - -install( DIRECTORY ${TESTSUBDIRS} - DESTINATION ${CMAKE_INSTALL_DATADIR}/testsuite -) - -install( PROGRAMS - do_tests.sh - DESTINATION ${CMAKE_INSTALL_DATADIR}/testsuite -) - -install( FILES - junit_xml.sh run_test.sh summarize_tests.py - DESTINATION ${CMAKE_INSTALL_DATADIR}/testsuite -)