diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c07c86dfb..809779425 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/CHANGES.md b/CHANGES.md index 1122ddf2d..005f0c6c3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,43 @@ +# Version 1.0.7 + +**New features** + +- Introducing `batchtools` subpackage for parameters exploration and optimization + +- Added progress-bar indicating network creation progress. Toggle the progress bar with cfg.progressBar + +- cfg.connRandomSecFromList and cfg.distributeSynsUniformly can now be overriden in individual conn rule + +- Added ability to use `sec`, `loc`, `preSec` and `preLoc` from list in connList-type connParams + +- Updated tests.examples.utils to allow for dynamic pathing + +- Dropped python2 support + +**Bug fixes** + +- Better handling of exceptions in `importCellParams()` (incl. `h.load_file()` - issue 782) + +- Pandas deprecated parameter fix + +- Fixed pointer id overflow on MPI (e.g. for gap junctions) + +- preSec and preLoc are no longer lost for inverse pointer connection + +- Fixed crash due to use of matplotlib.TextArea deprecated param (credit: Christian O'Reilly) + +- syncLines in rasterPlot restored + +- Fixed a bug in `gatherDataFromFiles()` where cellGids for node 0 were lost + +- Fixed generating rhythmic spiking pattern with 'uniform' option + +- Fixed misleading console output when cfg.recordStims is On + +- The colors in CSD plots are now properly aligned vertically with the CSD time-series overlays (credit: Sam Neymotin) + +- Update mkdir to makedirs (credit: Jacob Sprouse) + # Version 1.0.6 **New features** diff --git a/doc/build.py b/doc/build.py index 3a3e6b5ff..75f572aff 100644 --- a/doc/build.py +++ b/doc/build.py @@ -51,7 +51,7 @@ 13) Announce the new release 13a) New release announcement text: NetPyNE v#.#.# is now available. For a complete list of changes and bug fixes see: https://github.com/suny-downstate-medical-center/netpyne/releases/tag/v#.#.# - See here for instructions to install or update to the latest version: http://www.netpyne.org/install.html + See here for instructions to install or update to the latest version: https://www.netpyne.org/documentation/installation 13b) Announce on NEURON forum: https://www.neuron.yale.edu/phpBB/viewtopic.php?f=45&t=3685&sid=9c380fe3a835babd47148c81ae71343e 13c) Announce to Google group: diff --git a/doc/source/conf.py b/doc/source/conf.py index 52deba232..4979653f7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -67,9 +67,9 @@ # built documents. # # The short X.Y version. -version = '1.0.6' +version = '1.0.7' # The full version, including alpha/beta/rc tags. -release = '1.0.6' +release = '1.0.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/user_documentation.rst b/doc/source/user_documentation.rst index 8843f4c64..1fcd64eed 100644 --- a/doc/source/user_documentation.rst +++ b/doc/source/user_documentation.rst @@ -1183,6 +1183,65 @@ From a specific section and location, you can record section variables such as v # is equivalent to recording soma.myPP.V in NEURON. simConfig.recordTraces['VmyPP'] = {'sec': 'soma', 'pointp': 'myPP', 'var': 'V'} + ## Recording from Synaptic Currents Mechanisms + + # Example of recording from an excitatory synaptic mechanism + # record the 'i' variable (current) from an excitatory synaptic mechanism located + # in the middle of the 'dend' section. This is equivalent to recording + # dend(0.5).exc._ref_i in NEURON. + simConfig.recordTraces['iExcSyn'] = {'sec': 'dend', 'loc': 0.5, 'synMech': 'exc', 'var': 'i'} + + # Example of recording from an inhibitory synaptic mechanism + # record the 'i' variable (current) from an inhibitory synaptic mechanism located + # at 0.3 of the 'soma' section. This is equivalent to recording + # soma(0.3).inh._ref_i in NEURON. + simConfig.recordTraces['iInhSyn'] = {'sec': 'soma', 'loc': 0.3, 'synMech': 'inh', 'var': 'i'} + + # Example of recording multiple synaptic currents + # Recording synaptic currents + simConfig.recordSynapticCurrents = True + synaptic_curr = [ + ('AMPA', 'i'), # Excitatory synaptic current + ('NMDA', 'i'), # Excitatory synaptic current + ('GABA_A', 'i') # Inhibitory synaptic current + ] + if simConfig.recordSynapticCurrents: + for syn_curr in synaptic_curr: + trace_label = f'i__soma_0__{syn_curr[0]}__{syn_curr[1]}' + simConfig.recordTraces.update({trace_label: {'sec': 'soma_0', 'loc': 0.5, 'mech': syn_curr[0], 'var': syn_curr[1]}}) + +The names ``'iExcSyn'`` , ``'iInhSyn'`` , ``'AMPA'`` , ``'NMDA'`` , ``'GABA_A'`` are those defined by the user in ``netParams.synMechParams``, and can be found using ``netParams.synMechParams.keys()``. The variables that can be paired with the synaptic mechanism in ``synaptic_curr`` tuples can be found by inspecting the MOD file that is used to define that synaptic mechanism, and the ones that can be recorded are defined as ``RANGE`` type in the MOD file. + +Example: + +* A synaptic mechanism defined in ``netParams.synMechParamsas`` as ``'AMPA'`` that uses the ``MyExp2SynBB.mod`` template. + + * The user can go to the source ``/mod`` folder, and open the ``MyExp2SynBB.mod`` file, to inspect which variables are defined as ``RANGE``, and those can be recorded. + * The user can also modify the variable type, and define as ``RANGE``, and then recompile the mechanism to make it recordable in netpyne. + +* Example of variables that can be recorded for the given file + + * The user can record ``'tau1'``, ``'tau2'``, ``'e'``, ``'i'``, ``'g'``, ``'Vwt'``, ``'gmax'``. + + +.. code-block:: python + + : $Id: MyExp2SynBB.mod,v 1.4 2010/12/13 21:27:51 samn Exp $ + NEURON { + : THREADSAFE + POINT_PROCESS MyExp2SynBB + RANGE tau1, tau2, e, i, g, Vwt, gmax + NONSPECIFIC_CURRENT i + } + +Those should be specified in ``synaptic_curr`` as: + +.. code-block:: python + + synaptic_curr = [ + ('AMPA', 'i'), # Excitatory synaptic current + ('AMPA', 'g'), # Channel conductance + ] .. _package_functions: @@ -2547,3 +2606,498 @@ The code for neural network optimization through evolutionary algorithm used in .. Adding cell classes .. -------------------- +Running a Batch Job +=================== + +The NetPyNE batchtools subpackage provides a method of automating job submission and reporting + +A diagram of the object interfaces... + +:: + + batch<-->\ /---> configuration_0 >---\ + \ / specs---\ + \<--->dispatcher_0 sim_0 + \ \ comm ---/ + \ \---< results_0 <---/ + \ + \ /---> configuration_1 >---\ + \ / specs---\ + \<--->dispatcher_1 sim_1 + \ \ comm ---/ + \ \---< results_1 <---/ + \ + \ + ... + +While objects and interfaces can be handled directly, batchtools offers simple wrapper commands applicable to most use-cases, where +automatic parameter searches can be done by specifying a search space and algorithm through `netpyne.batchtools.search`, and +parameter to model translation and result communication is handled through `netpyne.batchtools.specs` and `netpyne.batchtools.comm` respectively. + +A diagram of the wrapper interactions... + +:: + + netpyne.batchtools.search.search( ) ----------------------------\ host + | | + | search( ) | + ============================================================================================== + | comm.initialize( ) + | comm.send( ) + | cfg = netpyne.batchtools.specs.SimConfig( ) comm.close( ) + | | ^ ^ + v v | | + cfg.update_cfg() ----------------------------------------/ | + | + send( ) netpyne.batchtools.comm( ) + simulation + +1. Setting up batchtools +------------------------ +Beyond the necessary dependency installations for NetPyNE and NEURON, the following `pip` installations are preferred. + +The NetPyNE installation should be handled as a development installation to allow for up to date fixes:: + + git clone https://github.com/Neurosim-lab/netpyne.git + cd netpyne + pip install -e . + +A development install of the batchtools subpackage:: + + git clone https://github.com/jchen6727/batchtk.git + cd batchtk + pip install -e . + +Ray is a dependency for batchtools, and should be installed with the following command:: + + pip install -U ray[default] + +2. Examples +----------- +Examples of NetPyNE batchtools usage can be found in the ``examples`` directory `on the NetPyNE github `_. + +Examples of the underlying batchtk package can be in the ``examples`` directory `on the batchtk github `_. + +3. Retrieving batch configuration values through the ``specs`` object +--------------------------------------------------------------------- +Each simulation is able to retrieve relevant configurations through the ``specs`` object, and communicate with +the dispatcher through the ``comm`` object. + +importing the relevant objects + +.. code-block:: python + + from netpyne.batchtools import specs, comm + cfg = specs.SimConfig() # create a SimConfig object, can be provided with a dictionary on initial call to set initial values + netParams = specs.NetParams() # create a netParams object + +``netpyne.batchtools.specs`` behaves similarly to ``netpyne.sim.specs`` except in the following cases: + +* ``netpyne.batchtools.specs`` automatically captures relevant configuration mappings created by the ``dispatcher`` upon initialization + + * these mappings can be retrieved via ``specs.get_mappings()`` + +* the SimConfig object created by ``netpyne.batch.specs.SimConfig()`` will update itself with relevant configuration mappings through the ``update()`` method + +.. code-block:: python + + from netpyne.batchtools import specs # import the custom batch specs + cfg = specs.SimConfig() # create a SimConfig object + cfg.update() # update the cfg object with any relevant mappings for this particular batch job + +The ``update`` method will update the ``SimConfig`` object ``first`` with values supplied in the argument call, and ``then`` with the configuration mappings captured in ``specs`` (see: ``specs.get_mappings()``) + +This replaces the previous idiom for updating the SimConfig object with mappings from the batched job submission + +.. code-block:: python + + try: + from __main__ import cfg # import SimConfig object with params from parent module + except: + from cfg import cfg # if no simConfig in parent module, import directly from tut8_cfg module + +4. Additional functionality within the simConfig object +------------------------------------------------------- + +Rather than handling custom ``SimConfig`` object attribute declaration through the ``batch`` ``initCfg`` argument, the new NetPyNE batchtools subpackage moves the custom declaration of ``SimConfig`` attributes to the actual ``SimConfig`` object, allowing them both during instantiation of the object as well as when calling ``cfg.update()`` + +.. code-block:: python + + from netpyne.batchtools import specs # import the custom batch specs + cfg = specs.SimConfig({'foo': 0, 'bar': 1, 'baz': 2}) # create a SimConfig object, initializes it with a dictionary {'foo': 0} such that + assert cfg.foo == 0 # cfg.foo == 0 + assert cfg.bar == 1 # cfg.bar == 1 + assert cfg.baz == 2 # cfg.baz == 2 + cfg.update({'foo': 3}) # update the cfg object with any relevant mappings for this particular batch job + assert cfg.foo == 3 # cfg.foo == 3 + assert cfg.bar == 1 # cfg.bar remains unchanged + assert cfg.baz == 2 # cfg.baz remains unchanged + +``cfg.update()`` supports also supports the optional argument ``force_match``, which forces values in the update dictionary to match existing attributes within the ``SimConfig`` object. This setting is recommended to be set to ``True`` in order to prevent the unanticipated creation of new attributes within the ``SimConfig`` object at runtime ... + +.. code-block:: python + + from netpyne.batchtools import specs # import the custom batch specs + cfg = specs.SimConfig({'type': 0}) # create a SimConfig object, initializes it with a dictionary {'type': 0} such that + assert cfg.foo == 0 # cfg.type == 0 + try: + cfg.update({'typo': 1}, force_match=True) # cfg.typo is not defined, so this line will raise an AttributeError + except Exception as e: + print(e) + cfg.update({'typo': 1}) # without force_match, the typo attribute cfg.fooo is created and set to 1 + assert cfg.type == 0 # cfg.type remains unchanged due to a typo in the attribute name 'type' -> 'typo' + assert cfg.typo == 1 # instead, cfg.typo is created and set to the value 1 + +Both the initialization of the ``cfg`` object with ``specs.SimConfig()`` and the subsequent call to ``cfg.update()`` handle nested containers... + +.. code-block:: python + + from netpyne.batchtools import specs + cfg = specs.SimConfig({'foo': {'val0': 0, 'arr0': [0, 1, 2]}}) + assert cfg.foo['val0'] == 0 + assert cfg.foo['arr0'][0] == 0 + cfg.update({'foo': {'val0': 10, # update cfg.foo['val0'] to 10 + 'arr0': {0: 20 # update cfg.arr0[0] to 20 + 1: 30}}}) # update cfg.arr0[1] to 30 + assert cfg.foo['val0'] == 10 + assert cfg.foo['arr0'][0] == 20 + assert cfg.foo['arr0'][1] == 30 + assert cfg.foo['arr0'][2] == 2 # cfg.arr0[2] remains unchanged + +After updating the ``cfg`` object with the supplied dictionary, further updates will be made as appropriate by the calling ``batch`` processes search parameters... + +5. Communicating results to the ``dispatcher`` with the ``comm`` object +----------------------------------------------------------------------- + +Prior batched simulations relied on ``.pkl`` files to communicate data. The ``netpyne.batch`` subpackage uses a specific ``comm`` object to send custom data back +The ``comm`` object determines the method of communication based on the batch job submission type. + +In terms of the simulation, the following functions are available to the user: + +* **comm.initialize()**: establishes a connection with the batch ``dispatcher`` for sending data + +* **comm.send()**: sends ```` to the batch ``dispatcher`` + + * for ``search`` jobs, it is important to match the data sent with the metric specified in the search function + +* **comm.close()**: closes and cleans up the connection with the batch ``dispatcher`` + +6. Specifying a batch job +------------------------- +Batch job handling is implemented with methods from ``netpyne.batchtools.search`` + +**search** + +.. code-block:: python + + def search(job_type: str, # the submission engine to run a single simulation (e.g. 'sge', 'sh') + comm_type: str, # the method of communication between host dispatcher and the simulation (e.g. 'socket', 'filesystem') + run_config: Dict, # batch configuration, (keyword: string pairs to customize the submit template) + params: Dict, # search space (dictionary of parameter keys: tune search spaces) + algorithm: Optional[str] = "variant_generator", # search algorithm to use, see SEARCH_ALG_IMPORT for available options + label: Optional[str] = 'search', # label for the search + output_path: Optional[str] = '../batch', # directory for storing generated files + checkpoint_path: Optional[str] = '../ray', # directory for storing checkpoint files + max_concurrent: Optional[int] = 1, # number of concurrent trials to run at one time + batch: Optional[bool] = True, # whether concurrent trials should run synchronously or asynchronously + num_samples: Optional[int] = 1, # number of trials to run + metric: Optional[str] = "loss", # metric to optimize (this should match some key: value pair in the returned data + mode: Optional[str] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric + algorithm_config: Optional[dict] = None, # additional configuration for the search algorithm + ) -> tune.ResultGrid: # results of the search + +The basic search implemented with the ``search`` function uses ``ray.tune`` as the search algorithm backend, returning a ``tune.ResultGrid`` which can be used to evaluate the search space and results. It takes the following parameters; + +* **job_type**: either "``sge``" or "``sh``", specifying how the job should be submitted, "``sge``" will submit batch jobs through the Sun Grid Engine. "``sh``" will submit bach jobs through the shell on a local machine +* **comm_type**: either "``socket``" or "``filesystem``", specifying how the job should communicate with the dispatcher +* **run_config**: a dictionary of keyword: string pairs to customize the submit template, the expected keyword: string pairs are dependent on the job_type:: + + ======= + sge + ======= + queue: the queue to submit the job to (#$ -q {queue}) + cores: the number of cores to request for the job (#$ -pe smp {cores}) + vmem: the amount of memory to request for the job (#$ -l h_vmem={vmem}) + realtime: the amount of time to request for the job (#$ -l h_rt={realtime}) + command: the command to run for the job + + example: + run_config = { + 'queue': 'cpu.q', # request job to be run on the 'cpu.q' queue + 'cores': 8, # request 8 cores for the job + 'vmem': '8G', # request 8GB of memory for the job + 'realtime': '24:00:00', # set timeout of the job to 24 hours + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py' + } # set the command to be run to 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py' + + ======= + sh + ======= + command: the command to run for the job + + example: + run_config = { + 'command': 'mpiexec -n 8 nrniv -python -mpi init.py' + } # set the command to be run + +* **params**: a dictionary of config values to perform the search over. The keys of the dictionary should match the keys of the config object to be updated. Lists or numpy generators >2 values will force a grid search over the values; otherwise, a list of two values will create a uniform distribution sample space. + + **usage 1**: updating a constant value specified in the ``SimConfig`` object + +.. code-block:: python + + # take a config object with the following parameter ``foo`` + cfg = specs.SimConfig() + cfg.foo = 0 + cfg.update() + + # specify a search space for ``foo`` such that a simulation will run with: + # cfg.foo = 0 + # cfg.foo = 1 + # cfg.foo = 2 + # ... + # cfg.foo = 9 + + # using: + params = { + 'foo': range(10) + } + + **usage 2**: updating a nested object in the ``SimConfig`` object + +.. code-block:: python + + # to update a nested object, the package uses the `.` operator to specify reflection into the object. + # take a config object with the following parameter object ``foo`` + cfg = specs.SimConfig() + cfg.foo = {'bar': 0, 'baz': 0} + cfg.update() + + # specify a search space for ``foo['bar']`` with `foo.bar` such that a simulation will run: + # cfg.foo['bar'] = 0 + # cfg.foo['bar'] = 1 + # cfg.foo['bar'] = 2 + # ... + # cfg.foo['bar'] = 9 + + # using: + params = { + 'foo.bar': range(10) + } + + # this reflection works with nested objects as well... + # i.e. + # cfg.foo = {'bar': {'baz': 0}} + # params = {'foo.bar.baz': range(10)} + + **usage 3**: updating a list object in the ``SimConfig`` object + +.. code-block:: python + + # to update a nested object, the package uses the `.` operator to specify reflection into the object. + # take a config object with the following + cfg = specs.SimConfig() + cfg.foo = [0, 1, 4, 9, 16] + cfg.update() + + # specify a search space for ``foo[0]`` with `foo.0` such that a simulation will run: + # cfg.foo[0] = 0 + # cfg.foo[0] = 1 + # cfg.foo[0] = 2 + # ... + # cfg.foo[0] = 9 + + # using: + params = { + 'foo.0': range(10) + } + + # this reflection works with nested objects as well... + +* **algorithm** : the search algorithm (supported within ``ray.tune``) + + **Supported algorithms** + +.. code-block:: python + + * "variant_generator": grid and random based search of the parameter space (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "random": grid and random based search of the parameter space (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "axe": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "bayesopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "hyperopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "bohb": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "nevergrad": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "optuna": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "hebo": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "sigopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "zoopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + +* **label**: a label for the search, used for output file naming + +* **output_path**: the directory for storing generated files, can be a relative or absolute path + +* **checkpoint_path**: the directory for storing checkpoint files (maintained by ``ray.tune``)in case the search needs to be restored, can be a relative or absolute path + +* **max_concurrent**: the number of concurrent trials to run at one time, it is recommended to keep in mind the resource usage of each trial to avoid overscheduling + +* **batch**: whether concurrent trials should run synchronously or asynchronously + +* **num_samples**: the number of trials to run, for any grid search, each value in the grid will be sampled ``num_samples`` times. + +* **metric**: the metric to optimize (this should match some key: value pair in the returned data) + +* **mode**: either 'min' or 'max' (whether to minimize or maximize the metric) + +* **algorithm_config**: additional configuration for the search algorithm (see the `optuna docs `_) + +7. Batch searches on the Rosenbrock function (some simple examples) +------------------------------------------------------------------- +The ``examples`` directory `on the NetPyNE github `_ contains multiple methods of performing automatic parameter search of a +2 dimensional Rosenbrock function. These examples are used to quickly demonstrate some of the functionality of batch communications rather than the full process of running parameter searches on a detailed +NEURON simulation (see 7. Performing parameter optimization searches (CA3 example)) and therefore only contain the a `batch.py` file containing the script detailing the parameter space and search method, and a +`rosenbrock.py` file containing the function to explore, and the appropriate declarations and calls for batch automation and communication (rather than the traditional `cfg.py`, `netParams.py`, and `init.py` files). + +1. `basic_rosenbrock `_ + +This demonstrates a basic grid search of the Rosenbrock function using the new ``batchtools``, where the search space is defined as the cartesian product of ``params['x0']`` and ``params['x1']`` + +.. code-block:: python + + # from batch.py + params = {'x0': [0, 3], + 'x1': [0, 3], + } + +that is, with the values ``cfg.x0``, ``cfg.x1`` iterating over: ``[(0, 0), (0, 3), (3, 0), (3, 3)]`` list + +2. `coupled_rosenbrock `_ + +This demonstrates a basic paired grid search, where ``x0`` is ``[0, 1, 2]`` and x1[n] is ``x0[n]**2`` + +.. code-block:: python + + # from batch.py + x0 = numpy.arange(0, 3) + x1 = x0**2 + + x0_x1 = [*zip(x0, x1)] + params = {'x0_x1': x0_x1 + } + +the ``x0`` and ``x1`` values are paired together to create a search space ``x0_x1`` iterating over: ``[(0, 0), (1, 1), (2, 4)]`` list + +then, in the ``rosenbrock.py`` file, a list of two values ``cfg.x0_x1`` is created to capture the ``x0_x1`` values, which is then unpacked into individual ``x0`` and ``x1`` values + +.. code-block:: python + + # from rosenbrock.py + cfg.x0_x1 = [1, 1] + + cfg.update_cfg() + + # -------------- unpacking x0_x1 list -------------- # + x0, x1 = cfg.x0_x1 + +then the Rosenbrock function is evaluated with the unpacked ``x0`` and ``x1`` + +3. `random_rosenbrock `_ + +This demonstrates a grid search over a nested object, where ``xn`` is a list of 2 values which are independently modified to search the cartesian product of ``[0, 1, 2, 3, 4]`` and ``[0, 1, 2, 3, 4]`` + +.. code-block:: python + + # from batch.py + params = {'xn.0': numpy.arange(0, 5), + 'xn.1': numpy.arange(0, 5) + } + +By using ``xn.0`` and ``xn.1`` we can reference the 0th and 1st elements of the list, which is created and modified in rosenbrock.py + +.. code-block:: python + + # from rosenbrock.py + cfg.xn = [1, 1] + + cfg.update_cfg() + + # ---------------- unpacking x list ---------------- # + x0, x1 = cfg.xn + + +8. Performing parameter optimization searches (CA3 example) +----------------------------------------------------------- +The ``examples`` directory `on the NetPyNE github `_ shows both a ``grid`` based search as well as an ``optuna`` based optimization. + +In the ``CA3`` example, we tune the ``PYR->BC`` ``NMDA`` and ``AMPA`` synaptic weights, as well as the ``BC->PYR`` ``GABA`` synaptic weight. Note the search space is defined + +.. code-block:: python + + # from optuna_search.py + params = {'nmda.PYR->BC' : [1e-3, 1.8e-3], + 'ampa.PYR->BC' : [0.2e-3, 0.5e-3], + 'gaba.BC->PYR' : [0.4e-3, 1.0e-3], + } + +in both ``optuna_search.py``, defining the upper and lower bounds of the search space, while in ``grid_search.py`` the search space is defined + +.. code-block:: python + + # from grid_search.py + params = {'nmda.PYR->BC' : numpy.linspace(1e-3, 1.8e-3, 3), + 'ampa.PYR->BC' : numpy.linspace(0.2e-3, 0.5e-3, 3), + 'gaba.BC->PYR' : numpy.linspace(0.4e-3, 1.0e-3, 3), + } + +which defines ``3x3x3`` specific values to search over + +Note that the ``metric`` specifies a specific ``string`` (``loss``) to report and optimize around. This value is generated and ``sent`` by the ``init.py`` simulation + +.. code-block:: python + + # from init.py + results['PYR_loss'] = (results['PYR'] - 3.33875)**2 + results['BC_loss'] = (results['BC'] - 19.725 )**2 + results['OLM_loss'] = (results['OLM'] - 3.470 )**2 + results['loss'] = (results['PYR_loss'] + results['BC_loss'] + results['OLM_loss']) / 3 + out_json = json.dumps({**inputs, **results}) + + print(out_json) + #TODO put all of this in a single function. + comm.send(out_json) + comm.close() + +The ``out_json`` output contains a dictionary which includes the ``loss`` metric (calculated as the MSE between observed and expected values) + +In a multi-objective optimization, the relevant ``PYR_loss``, ``BC_loss``, and ``OLM_loss`` components are additionally included (see ``mo_optuna_search.py``) + +9. Multiprocessing and parallelization +-------------------------------------- +When using ``mpiexec`` to run simulations, it is important to only have one thread handle communications with the ``dispatcher`` host. To do this, encapsulate calls to ``comm.send()`` and ``comm.close()`` within +a conditional block which checks that the proper thread (set as the 0th thread) executes communication calls as follows... + +.. code-block:: python + + out_json = json.dumps({**inputs, **results}) # prepare message to send to host... + if comm.is_host(): # only single thread enters this execution block... + comm.send(out_json) + comm.close() + +9. Parameter Importance Evaluation Using fANOVA +----------------------------------------------- +A new feature in the batchtools beta release is the ability to evaluate parameter importance using a functional ANOVA inspired algorithm via the ``Optuna`` and ``scikit-learn`` libraries. +(See `the original Hutter paper `_ and its `citation `_) + +Currently, only unpaired single parameter importance to a single metric score is supported through the ``NetPyNE.batchtools.analysis`` ``Analyzer`` object, with an example of its usage +`here `_: + +To run the example, generate an output ``grid.csv`` using ``batch.py``, then loading that ``grid.csv`` into the ``Analyzer`` object. Then, using ``run_analysis`` will generate, per parameter, a single score indicative of the estimated ``importance`` of the parameter: that is, the estimated effect on the total variance of the model within the given bounds. + +.. code-block:: python + + # from analysis.py + from netpyne.batchtools.analysis import Analyzer + + analyzer = Analyzer(params = ['x.0', 'x.1', 'x.2', 'x.3'], metrics = ['fx']) # specify the parameter space and metrics of the batch function + analyzer.load_file('grid.csv') # load the grid file generated by the batch run + results = analyzer.run_analysis() # run fANOVA analysis and store the importance values in a results dictionary + diff --git a/examples/HHTut/src/HHTut.py b/examples/HHTut/src/HHTut.py index 9ff40e99a..2d06c28e0 100644 --- a/examples/HHTut/src/HHTut.py +++ b/examples/HHTut/src/HHTut.py @@ -70,7 +70,6 @@ # Saving simConfig.filename = 'HHTut' # Set file output name -simConfig.saveFileStep = 1000 # step size in ms to save data to disk simConfig.savePickle = False # Whether or not to write spikes etc. to a .mat file simConfig.saveJson = True diff --git a/examples/HybridTut/src/cfg.py b/examples/HybridTut/src/cfg.py index 3000a52a0..ab178d5c2 100644 --- a/examples/HybridTut/src/cfg.py +++ b/examples/HybridTut/src/cfg.py @@ -23,7 +23,6 @@ # Saving cfg.filename = 'mpiHybridTut' # Set file output name -cfg.saveFileStep = 1000 # step size in ms to save data to disk cfg.savePickle = False # Whether or not to write spikes etc. to a .mat file cfg.saveJson = False # Whether or not to write spikes etc. to a .mat file cfg.saveMat = False # Whether or not to write spikes etc. to a .mat file diff --git a/examples/M1/src/cfg.py b/examples/M1/src/cfg.py index 89238f443..5c61b8684 100644 --- a/examples/M1/src/cfg.py +++ b/examples/M1/src/cfg.py @@ -34,7 +34,6 @@ # Saving cfg.filename = 'data/M1_ynorm_izhi' # Set file output name -cfg.saveFileStep = 1000 # step size in ms to save data to disk cfg.savePickle = False # save to pickle file cfg.saveJson = False # save to json file cfg.saveMat = False # save to mat file diff --git a/examples/RL_arm/params.py b/examples/RL_arm/params.py index a863d05ae..cf56ffe75 100644 --- a/examples/RL_arm/params.py +++ b/examples/RL_arm/params.py @@ -308,7 +308,6 @@ # Saving simConfig.filename = 'simdata' # Set file output name -simConfig.saveFileStep = 1000 # step size in ms to save data to disk simConfig.savePickle = True # Whether or not to write spikes etc. to a .mat file simConfig.saveJson = False # Whether or not to write spikes etc. to a .mat file simConfig.saveMat = False # Whether or not to write spikes etc. to a .mat file diff --git a/examples/netClamp/src/cfg.py b/examples/netClamp/src/cfg.py index 88d0c68db..dfac67e86 100644 --- a/examples/netClamp/src/cfg.py +++ b/examples/netClamp/src/cfg.py @@ -1,4 +1,3 @@ -import imp from netpyne import specs # Simulation options @@ -14,4 +13,4 @@ cfg.recordStim = True # required for netClamp to work cfg.analysis['plotRaster'] = {'orderBy': 'y', 'orderInverse': True} # Plot a raster -cfg.analysis['plotTraces'] = {'include': [5]} # Plot recorded traces for this list of cells \ No newline at end of file +cfg.analysis['plotTraces'] = {'include': [5]} # Plot recorded traces for this list of cells diff --git a/examples/rxd_buffering/src/netParams.py b/examples/rxd_buffering/src/netParams.py index 06a2014b2..22190e495 100644 --- a/examples/rxd_buffering/src/netParams.py +++ b/examples/rxd_buffering/src/netParams.py @@ -1,4 +1,3 @@ -import imp from netpyne import specs netParams = specs.NetParams() # object of class NetParams to store the network parameters @@ -27,4 +26,4 @@ netParams.rxdParams['reactions'] = {'buffering': {'reactant': '2 * ca + buf', 'product': 'cabuf', 'rate_f': kf, 'rate_b': kb}} ### rates -netParams.rxdParams['rates'] = {'degradation': {'species': 'buf', 'rate': '-1e-3 * buf'}} \ No newline at end of file +netParams.rxdParams['rates'] = {'degradation': {'species': 'buf', 'rate': '-1e-3 * buf'}} diff --git a/netpyne/__init__.py b/netpyne/__init__.py index 0dae0c25b..182303abb 100644 --- a/netpyne/__init__.py +++ b/netpyne/__init__.py @@ -4,7 +4,7 @@ NetPyNE consists of a number of sub-packages and modules. """ -__version__ = '1.0.6' +__version__ = '1.0.7' import os, sys display = os.getenv('DISPLAY') diff --git a/netpyne/analysis/csd.py b/netpyne/analysis/csd.py index ba61f7100..25cbb0ffb 100644 --- a/netpyne/analysis/csd.py +++ b/netpyne/analysis/csd.py @@ -3,15 +3,6 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from future import standard_library - -standard_library.install_aliases() - try: basestring except NameError: diff --git a/netpyne/analysis/dipole.py b/netpyne/analysis/dipole.py index 165467d4b..46c9da25c 100644 --- a/netpyne/analysis/dipole.py +++ b/netpyne/analysis/dipole.py @@ -3,24 +3,13 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - import os -from builtins import range -from builtins import round -from builtins import str try: basestring except NameError: basestring = str -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/filter.py b/netpyne/analysis/filter.py index 6e907cd85..2f812fc00 100644 --- a/netpyne/analysis/filter.py +++ b/netpyne/analysis/filter.py @@ -21,11 +21,6 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - import warnings import numpy as np diff --git a/netpyne/analysis/info.py b/netpyne/analysis/info.py index 76ef99d29..2871dc920 100644 --- a/netpyne/analysis/info.py +++ b/netpyne/analysis/info.py @@ -3,20 +3,11 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - try: basestring except NameError: basestring = str -from builtins import zip -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/interactive.py b/netpyne/analysis/interactive.py index cf2b478b8..3079d3cf1 100644 --- a/netpyne/analysis/interactive.py +++ b/netpyne/analysis/interactive.py @@ -3,14 +3,6 @@ """ -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ try: diff --git a/netpyne/analysis/lfp.py b/netpyne/analysis/lfp.py index 56dd2b7e6..8f2c43fe4 100644 --- a/netpyne/analysis/lfp.py +++ b/netpyne/analysis/lfp.py @@ -3,23 +3,11 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from builtins import range -from builtins import round -from builtins import str - try: basestring except NameError: basestring = str -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/lfp_orig.py b/netpyne/analysis/lfp_orig.py index e68ab66cd..d124ebc25 100644 --- a/netpyne/analysis/lfp_orig.py +++ b/netpyne/analysis/lfp_orig.py @@ -3,23 +3,11 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from builtins import range -from builtins import round -from builtins import str - try: basestring except NameError: basestring = str -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/network.py b/netpyne/analysis/network.py index 068451b7f..8426feafe 100644 --- a/netpyne/analysis/network.py +++ b/netpyne/analysis/network.py @@ -3,26 +3,11 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from builtins import open -from builtins import next -from builtins import range -from builtins import str - try: basestring except NameError: basestring = str -from builtins import zip - -from builtins import round -from future import standard_library -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/rxd.py b/netpyne/analysis/rxd.py index 3678d6171..75e0fcfbf 100644 --- a/netpyne/analysis/rxd.py +++ b/netpyne/analysis/rxd.py @@ -3,19 +3,11 @@ """ -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - try: basestring except NameError: basestring = str -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/spikes.py b/netpyne/analysis/spikes.py index 98fc65297..2d42e241a 100644 --- a/netpyne/analysis/spikes.py +++ b/netpyne/analysis/spikes.py @@ -3,19 +3,6 @@ """ -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from future import standard_library - -standard_library.install_aliases() - -from builtins import round -from builtins import open -from builtins import range - try: to_unicode = unicode except NameError: @@ -83,7 +70,7 @@ def prepareSpikeData( orderBy = 'gid' elif orderBy == 'pop': df['popInd'] = df['pop'].astype('category') - df['popInd'].cat.set_categories(sim.net.pops.keys(), inplace=True) + df['popInd'] = df['popInd'].cat.set_categories(sim.net.pops.keys()) orderBy = 'popInd' elif isinstance(orderBy, basestring) and not isinstance(cells[0]['tags'][orderBy], Number): orderBy = 'gid' @@ -91,7 +78,7 @@ def prepareSpikeData( if isinstance(orderBy, list): if 'pop' in orderBy: df['popInd'] = df['pop'].astype('category') - df['popInd'].cat.set_categories(sim.net.pops.keys(), inplace=True) + df['popInd'] = df['popInd'].cat.set_categories(sim.net.pops.keys()) orderBy[orderBy.index('pop')] = 'popInd' keep = keep + list(set(orderBy) - set(keep)) elif orderBy not in keep: diff --git a/netpyne/analysis/spikes_legacy.py b/netpyne/analysis/spikes_legacy.py index 66e92bc2f..e35c2c0e8 100755 --- a/netpyne/analysis/spikes_legacy.py +++ b/netpyne/analysis/spikes_legacy.py @@ -3,25 +3,11 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from builtins import dict -from builtins import round -from builtins import str - try: basestring except NameError: basestring = str -from builtins import range -from builtins import zip - -from future import standard_library -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/tools.py b/netpyne/analysis/tools.py index 9d68db49f..0a11e507e 100644 --- a/netpyne/analysis/tools.py +++ b/netpyne/analysis/tools.py @@ -3,19 +3,6 @@ """ -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from future import standard_library - -standard_library.install_aliases() - -from builtins import round -from builtins import open -from builtins import range - try: to_unicode = unicode except NameError: diff --git a/netpyne/analysis/traces.py b/netpyne/analysis/traces.py index 517428287..0c4b9f38f 100644 --- a/netpyne/analysis/traces.py +++ b/netpyne/analysis/traces.py @@ -3,21 +3,11 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from builtins import range -from builtins import str - try: basestring except NameError: basestring = str -from future import standard_library -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/utils.py b/netpyne/analysis/utils.py index 2b49515f0..bfc666d27 100644 --- a/netpyne/analysis/utils.py +++ b/netpyne/analysis/utils.py @@ -3,15 +3,6 @@ """ -from __future__ import print_function -from __future__ import division -from __future__ import unicode_literals -from __future__ import absolute_import - -from builtins import round -from builtins import open -from builtins import range - # required to make json saving work in Python 2/3 try: to_unicode = unicode @@ -22,9 +13,6 @@ except NameError: basestring = str -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ if __gui__: diff --git a/netpyne/analysis/wrapper.py b/netpyne/analysis/wrapper.py index 701b0ffd2..93626d4cf 100644 --- a/netpyne/analysis/wrapper.py +++ b/netpyne/analysis/wrapper.py @@ -3,14 +3,6 @@ """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from future import standard_library - -standard_library.install_aliases() from netpyne import __gui__ try: diff --git a/netpyne/batch/__init__.py b/netpyne/batch/__init__.py index 74580c1ae..62da2bb9b 100644 --- a/netpyne/batch/__init__.py +++ b/netpyne/batch/__init__.py @@ -3,11 +3,4 @@ """ -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -from future import standard_library - -standard_library.install_aliases() from .batch import Batch diff --git a/netpyne/batch/asd_parallel.py b/netpyne/batch/asd_parallel.py index 8e7ccdbdd..45ef1e9a1 100644 --- a/netpyne/batch/asd_parallel.py +++ b/netpyne/batch/asd_parallel.py @@ -3,20 +3,6 @@ """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from builtins import zip - -from builtins import range -from builtins import open -from builtins import str -from future import standard_library - -standard_library.install_aliases() - # required to make json saving work in Python 2/3 try: to_unicode = unicode diff --git a/netpyne/batch/batch.py b/netpyne/batch/batch.py index a3eac6b41..6c24a9c9f 100644 --- a/netpyne/batch/batch.py +++ b/netpyne/batch/batch.py @@ -3,20 +3,6 @@ """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from builtins import zip - -from builtins import range -from builtins import open -from builtins import str -from future import standard_library - -standard_library.install_aliases() - # required to make json saving work in Python 2/3 try: to_unicode = unicode @@ -76,37 +62,40 @@ def tupleToStr(obj): class Batch(object): """ Class that handles batch simulations on NetPyNE. - Relevant Attributes: - batchLabel : str - The label of the batch used for directory/file naming of batch generated files. - cfgFile : str - The path of the file containing the `netpyne.simConfig.SimConfig` object - cfg : `netpyne.simConfig.SimConfig` - The `netpyne.simConfig.SimConfig` object - N.B. either cfg or cfgFile should be specified #TODO: replace with typechecked single argument - netParamsFile : str - The path of the file containing the `netpyne.netParams.NetParams` object - netParams : `netpyne.netParams.NetParams` - The `netpyne.netParams.NetParams` object - N.B. either netParams or netParamsFile should be specified #TODO: replace with typechecked single argument - initCfg : dict - params dictionary that is used to modify the batch cfg prior to any algorithm based parameter modifications - saveFolder : str - The path of the folder where the batch will be saved (defaults to batchLabel) - method : str - The algorithm method used for batch - runCfg : dict - Keyword: Arg dictionary used to generate submission templates (see utils.py) - evolCfg : dict #TODO: replace with algoCfg? to merge with optimCfg - Keyword: Arg dictionary used to define evolutionary algorithm parameters (see evol.py) - optimCfg : dict #TODO: replace with algoCfg? to merge with evolCfg - Keyword: Arg dictionary used to define optimization algorithm parameters - (see asd_parallel.py, optuna_parallel.py, sbi_parallel.py) - params : list - Dictionary of parameters to be explored per algorithm (grid, evol, asd, optuna, sbi) - (see relevant algorithm script for details) - seed : int - Seed for random number generator for some algorithms + + Attributes + ---------- + + batchLabel : str + The label of the batch used for directory/file naming of batch generated files. + cfgFile : str + The path of the file containing the `netpyne.simConfig.SimConfig` object + cfg : `netpyne.simConfig.SimConfig` + The `netpyne.simConfig.SimConfig` object + N.B. either cfg or cfgFile should be specified #TODO: replace with typechecked single argument + netParamsFile : str + The path of the file containing the `netpyne.netParams.NetParams` object + netParams : `netpyne.netParams.NetParams` + The `netpyne.netParams.NetParams` object + N.B. either netParams or netParamsFile should be specified #TODO: replace with typechecked single argument + initCfg : dict + params dictionary that is used to modify the batch cfg prior to any algorithm based parameter modifications + saveFolder : str + The path of the folder where the batch will be saved (defaults to batchLabel) + method : str + The algorithm method used for batch + runCfg : dict + Keyword: Arg dictionary used to generate submission templates (see utils.py) + evolCfg : dict #TODO: replace with algoCfg? to merge with optimCfg + Keyword: Arg dictionary used to define evolutionary algorithm parameters (see evol.py) + optimCfg : dict #TODO: replace with algoCfg? to merge with evolCfg + Keyword: Arg dictionary used to define optimization algorithm parameters + (see asd_parallel.py, optuna_parallel.py, sbi_parallel.py) + params : list + Dictionary of parameters to be explored per algorithm (grid, evol, asd, optuna, sbi) + (see relevant algorithm script for details) + seed : int + Seed for random number generator for some algorithms """ def __init__( diff --git a/netpyne/batch/evol.py b/netpyne/batch/evol.py index e74521fce..f9622d246 100644 --- a/netpyne/batch/evol.py +++ b/netpyne/batch/evol.py @@ -3,20 +3,7 @@ """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from builtins import zip - -from builtins import range -from builtins import open -from builtins import str from ctypes import util -from future import standard_library - -standard_library.install_aliases() # required to make json saving work in Python 2/3 try: diff --git a/netpyne/batch/grid.py b/netpyne/batch/grid.py index adec7a77f..dee221e96 100644 --- a/netpyne/batch/grid.py +++ b/netpyne/batch/grid.py @@ -3,20 +3,6 @@ """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from builtins import zip - -from builtins import range -from builtins import open -from builtins import str -from future import standard_library - -standard_library.install_aliases() - # required to make json saving work in Python 2/3 try: to_unicode = unicode @@ -24,7 +10,6 @@ to_unicode = str import pandas as pd -import imp import os, sys import glob from time import sleep diff --git a/netpyne/batch/optuna_parallel.py b/netpyne/batch/optuna_parallel.py index a0abee973..7af3793d2 100644 --- a/netpyne/batch/optuna_parallel.py +++ b/netpyne/batch/optuna_parallel.py @@ -3,20 +3,6 @@ """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from builtins import zip - -from builtins import range -from builtins import open -from builtins import str -from future import standard_library - -standard_library.install_aliases() - # required to make json saving work in Python 2/3 try: to_unicode = unicode diff --git a/netpyne/batch/sbi_parallel.py b/netpyne/batch/sbi_parallel.py index 5700548da..9ef44cb10 100644 --- a/netpyne/batch/sbi_parallel.py +++ b/netpyne/batch/sbi_parallel.py @@ -2,20 +2,8 @@ Module for SBI optimization """ -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from builtins import zip - -from builtins import range -from builtins import open -from builtins import str from lib2to3.pytree import NegatedPattern -from future import standard_library -standard_library.install_aliases() # required to make json saving work in Python 2/3 try: @@ -32,16 +20,11 @@ from neuron import h from sbi import utils as utils from sbi import analysis as analysis -from sbi.inference.base import infer -from sbi.analysis.plot import pairplot from sbi import utils from sbi import analysis -from sbi import inference -from sbi.inference import SNPE, SNLE, SNRE, simulate_for_sbi, prepare_for_sbi +from sbi.inference import SNPE, SNLE, SNRE, simulate_for_sbi +from sbi.inference.trainers.base import process_simulator, process_prior import torch -import torch.nn as nn -import torch.nn.functional as F -from scipy.stats import kurtosis pc = h.ParallelContext() # use bulletin board master/slave @@ -200,7 +183,11 @@ def objectiveFunc(param): updateBestFit(candidate, fitness) return torch.as_tensor(fitness + sum_statistics) - simulator, prior = prepare_for_sbi(lambda param: objectiveFunc(param), prior) + # Check prior, return PyTorch prior. + prior, _, prior_returns_numpy = process_prior(prior) + # Check simulator, returns PyTorch simulator able to simulate batches. + simulator = process_simulator(simulator, prior, prior_returns_numpy) + inference = sbi_md[sbi_method](prior=prior) if inference_type == 'single': diff --git a/netpyne/batch/utils.py b/netpyne/batch/utils.py index 3ddfdc514..99bdbe759 100644 --- a/netpyne/batch/utils.py +++ b/netpyne/batch/utils.py @@ -3,16 +3,8 @@ """ -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import import builtins -from future import standard_library - -standard_library.install_aliases() - import numpy as np import json import pickle @@ -36,11 +28,13 @@ def createFolder(folder): import os - if not os.path.exists(folder): - try: - os.mkdir(folder) - except OSError: - print(' Could not create %s' % (folder)) + # If file path does not exist, it will create the file path (parent and sub-directories) + + try: + os.makedirs(folder, exist_ok=True) + except Exception as e: + print('%s: Exception: %s,' % (os.path.abspath(__file__), e)) + raise SystemExit('Could not create %s' % (folder)) # ------------------------------------------------------------------------------- diff --git a/netpyne/batchtools/__init__.py b/netpyne/batchtools/__init__.py new file mode 100644 index 000000000..5a373d286 --- /dev/null +++ b/netpyne/batchtools/__init__.py @@ -0,0 +1,27 @@ +from netpyne.batchtools.runners import NetpyneRunner +from batchtk.runtk import dispatchers +from netpyne.batchtools import submits +from batchtk import runtk +from netpyne.batchtools.analysis import Analyzer + +specs = NetpyneRunner() + +from netpyne.batchtools.comm import Comm + +comm = Comm() + +dispatchers = dispatchers +submits = submits +runtk = runtk + + +""" +def analyze_from_file(filename): + analyzer = Fanova() + analyzer.load_file(filename) + analyzer.run_analysis( +""" + +#from ray import tune as space.comm +#list and lb ub + diff --git a/netpyne/batchtools/analysis.py b/netpyne/batchtools/analysis.py new file mode 100644 index 000000000..561e339ab --- /dev/null +++ b/netpyne/batchtools/analysis.py @@ -0,0 +1,55 @@ +import pandas +from collections import namedtuple +import numpy + +from optuna.importance._fanova._fanova import _Fanova + + +class Fanova(object): + def __init__(self, n_trees: int = 64, max_depth: int = 64, seed: int | None = None) -> None: + self._evaluator = _Fanova( + n_trees=n_trees, + max_depth=max_depth, + min_samples_split=2, + min_samples_leaf=1, + seed=seed, + ) + + def evaluate(self, X: pandas.DataFrame, y: pandas.DataFrame) -> dict: + assert X.shape[0] == y.shape[0] # all rows must be present + assert y.shape[1] == 1 # only evaluation for single metric supported + + evaluator = self._evaluator + #mins, maxs = X.min().values, X.max().values #in case bound matching is necessary. + search_spaces = numpy.array([X.min().values, X.max().values]).T # bounds + column_to_encoded_columns = [numpy.atleast_1d(i) for i in range(X.shape[1])] # encoding (no 1 hot/categorical) + evaluator.fit(X.values, y.values.ravel(), search_spaces, column_to_encoded_columns) + importances = numpy.array( + [evaluator.get_importance(i)[0] for i in range(X.shape[1])] + ) + return {col: imp for col, imp in zip(X.columns, importances)} + + +class Analyzer(object): + def __init__(self, + params: list, # list of parameters + metrics: list, # list of metrics + evaluator = Fanova()) -> None: + self.params = params + self.metrics = metrics + self.data = None + self.evaluator = evaluator + + def load_file(self, + filename: str # filename (.csv) containing the completed batchtools trials + ) -> None: + data = pandas.read_csv(filename) + param_space = data[["config/{}".format(param) for param in self.params]] + param_space = param_space.rename(columns={'config/{}'.format(param): param for param in self.params}) + results = data[self.metrics] + self.data = namedtuple('data', ['param_space', 'results'])(param_space, results) + + def run_analysis(self) -> dict: + return self.evaluator.evaluate(self.data.param_space, self.data.results) + + diff --git a/netpyne/batchtools/comm.py b/netpyne/batchtools/comm.py new file mode 100644 index 000000000..1a8a12763 --- /dev/null +++ b/netpyne/batchtools/comm.py @@ -0,0 +1,48 @@ +from netpyne.batchtools import specs +from batchtk.runtk.runners import get_class +from batchtk import runtk +from neuron import h +import warnings +HOST = 0 # for the purposes of send and receive with mpi. + +class Comm(object): + def __init__(self, runner = specs): + self.runner = runner + h.nrnmpi_init() + self.pc = h.ParallelContext() + self.rank = self.pc.id() + self.connected = False + + def initialize(self): + if self.is_host(): + try: + self.runner.connect() + self.connected = True + except Exception as e: + print("Failed to connect to the Dispatch Server, failover to Local mode. See: {}".format(e)) + self.runner._set_inheritance('file') #TODO or could change the inheritance of the runner ... + self.runner.env[runtk.MSGOUT] = "{}/{}.out".format(self.runner.cfg.saveFolder, self.runner.cfg.simLabel) + + def set_runner(self, runner_type): + self.runner = get_class(runner_type)() + def is_host(self): + return self.rank == HOST + def send(self, data): + if self.is_host(): + if self.connected: + self.runner.send(data) + else: + self.runner.write(data) + + def recv(self): #TODO to be tested, broadcast to all workers? + if self.is_host() and self.connected: + data = self.runner.recv() + else: + data = None + #data = self.is_host() and self.runner.recv() + #probably don't put a blocking statement in a boolean evaluation... + self.pc.barrier() + return self.pc.py_broadcast(data, HOST) + + def close(self): + self.runner.close() diff --git a/netpyne/batchtools/docs/batchtools.ipynb b/netpyne/batchtools/docs/batchtools.ipynb new file mode 100644 index 000000000..22fe3489f --- /dev/null +++ b/netpyne/batchtools/docs/batchtools.ipynb @@ -0,0 +1,314 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "Jupyter Tutorial: The NetPyNE batchtools subpackage\n", + "How to use the `specs` and `comm` to communicate with the `batchtools` `dispatcher`\n" + ], + "metadata": { + "collapsed": false + }, + "id": "89ec6ca2392a9a0d" + }, + { + "cell_type": "markdown", + "source": [ + "For each individual `sim`, communication with the `batchtools` `dispatcher` occurs through the `specs` and `comm` objects" + ], + "metadata": { + "collapsed": false + }, + "id": "be50f40d8e61a944" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "from netpyne.batchtools import specs, comm" + ], + "metadata": { + "collapsed": false + }, + "id": "6f321aedb7faf945", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "the `specs` object is an instantiation of a custom class extending the `batchtk` `Runner` ..." + ], + "metadata": { + "collapsed": false + }, + "id": "5f2f08f0b5e582c3" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "help(type(specs))" + ], + "metadata": { + "collapsed": false + }, + "id": "29fa261236494bc3", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "From this `specs` object, we can similarly call `specs.NetParams` and `specs.SimConfig` to create the NetPyNE objects..." + ], + "metadata": { + "collapsed": false + }, + "id": "64ead24451bbad4a" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "help(specs.NetParams)\n", + "help(specs.SimConfig)" + ], + "metadata": { + "collapsed": false + }, + "id": "43d263d080800019", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The `batchtools` job submission tool uses `environmental variables` to pass values to our `config` object created by `specs.SimConfig`, these `environmental variables` are captured during the `specs` `object creation` which occurs during the batchtools `import` (from the `batchtools` `__init__.py`:\n", + "```\n", + "from netpyne.batchtools.runners import NetpyneRunner\n", + "specs = NetpyneRunner()\n", + "```" + ], + "metadata": { + "collapsed": false + }, + "id": "710cc6084bd7af02" + }, + { + "cell_type": "markdown", + "source": [ + "Let's `export` some `environmental variables` to pass values to our `config` object. When this is handled by the `batchtools` `subpackage`, this occurs automatically..." + ], + "metadata": { + "collapsed": false + }, + "id": "52704684f5e80f3c" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "%env STRRUNTK0 =foo.bar=baz\n", + "%env FLOATRUNTK1 =float_val=7.7\n", + "from netpyne.batchtools import NetpyneRunner\n", + "specs = NetpyneRunner()" + ], + "metadata": { + "collapsed": false + }, + "id": "50de117ff7f43aa6", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "One way of retrieving these values is by calling `specs.get_mappings()`" + ], + "metadata": { + "collapsed": false + }, + "id": "fac14e517044b980" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(specs.get_mappings())" + ], + "metadata": { + "collapsed": false + }, + "id": "257fad390f4abce", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now, let's create our `config` object using the `specs.SimConfig()` constructor\n", + "This `config` object will hold a `dictionary` such that the initial values `foo['bar']` = `not_baz` and a `float_val` = `3.3`" + ], + "metadata": { + "collapsed": false + }, + "id": "92d41061bb828744" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "cfg = specs.SimConfig()\n", + "cfg.foo = {'bar': 'not_baz', 'qux': 'quux'}\n", + "cfg.float_val = 3.3\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))" + ], + "metadata": { + "collapsed": false + }, + "id": "ca121d6ab30c3e7b", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Finally, calling the `cfg.update_cfg()` method will overwrite the original values with our environment values, (`baz` and `7.7`)...\n", + "\n", + "in NetPyNE, this was originally handled with the:\n", + "```\n", + "try:\n", + " from __main__ import cfg\n", + "except:\n", + " from cfg import cfg\n", + "```\n", + "API idiom in the `netParams.py` file...\n", + " \n", + "as well as the \n", + "```\n", + "cfg, netParams = sim.readCmdLineArgs(simConfigDefault='src/cfg.py', netParamsDefault='src/netParams.py')\n", + "```\n", + "API idiom in the `init.py` file...\n", + "\n", + "using the `batchtools` subpackage, we can treat the `cfg` as an object and pass it between scripts via `import` statements...\n", + "in `netParams.py`...\n", + "```\n", + "from cfg import cfg\n", + "cfg.update()\n", + "```\n", + "in `init.py`...\n", + "```\n", + "from netParams import cfg, netParams\n", + "sim.createSimulateAnalyze(simConfig=cfg, netParams=netParams)\n", + "```" + ], + "metadata": { + "collapsed": false + }, + "id": "6ea43f729d0685d4" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(\"prior to cfg.update()\")\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))\n", + "print()\n", + "cfg.update() # call update_cfg to update values in the cfg object with values assigned by batch\n", + "print(\"after the cfg.update()\")\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))" + ], + "metadata": { + "collapsed": false + }, + "id": "a9426b6e6594961", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Finally, the `comm object` is used to report to the monitoring `dispatcher object`\n", + "the means of communication is dependent on which `dispatcher object` is instantiated, and communicated through environmental variables\n", + "in this case, since there is no `dispatcher object` the `comm` methods will simply perform `pass operations`" + ], + "metadata": { + "collapsed": false + }, + "id": "65bbb0ef2c76295a" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.initialize() # initializes comm object, establishing channel to communicate with the host dispatcher object" + ], + "metadata": { + "collapsed": false + }, + "id": "e9141d91d6e02aa3", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(comm.is_host()) # returns a boolean IF the calling process is the 0th ranked parallelcontext, similar to sim.pc.rank == 0" + ], + "metadata": { + "collapsed": false + }, + "id": "5ed6a524bd8a3e0b", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.send('message') # sends 'message' to the `dispatcher object`" + ], + "metadata": { + "collapsed": false + }, + "id": "1966edbf32649352", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.close() #finalizes communication, closes any resources used to communicate with the `dispatcher object`" + ], + "metadata": { + "collapsed": false + }, + "id": "34f021af4127363c" + }, + { + "cell_type": "markdown", + "source": [], + "metadata": { + "collapsed": false + }, + "id": "648746fff96b8a72" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/netpyne/batchtools/docs/batchtools.rst b/netpyne/batchtools/docs/batchtools.rst new file mode 100644 index 000000000..c21bf4da9 --- /dev/null +++ b/netpyne/batchtools/docs/batchtools.rst @@ -0,0 +1,435 @@ +Running a Batch Job +=================== + +The NetPyNE batchtools subpackage provides a method of automating job submission and reporting + +A diagram of the object interfaces... + +:: + + batch<-->\ /---> configuration_0 >---\ + \ / specs---\ + \<--->dispatcher_0 sim_0 + \ \ comm ---/ + \ \---< results_0 <---/ + \ + \ /---> configuration_1 >---\ + \ / specs---\ + \<--->dispatcher_1 sim_1 + \ \ comm ---/ + \ \---< results_1 <---/ + \ + \ + ... + +While objects and interfaces can be handled directly, batchtools offers simple wrapper commands applicable to most use-cases, where +automatic parameter searches can be done by specifying a search space and algorithm through `netpyne.batchtools.search`, and +parameter to model translation and result communication is handled through `netpyne.batchtools.specs` and `netpyne.batchtools.comm` respectively. + +A diagram of the wrapper interactions... + +:: + + netpyne.batchtools.search.search( ) ----------------------------\ host + | | + | search( ) | + ============================================================================================== + | comm.initialize( ) + | comm.send( ) + | cfg = netpyne.batchtools.specs.SimConfig( ) comm.close( ) + | | ^ ^ + v v | | + cfg.update_cfg() ----------------------------------------/ | + | + send( ) netpyne.batchtools.comm( ) + simulation + +1. Setting up batchtools +------------------------ +Beyond the necessary dependency installations for NetPyNE and NEURON, several additional `pip` installations are required. + +The NetPyNE installation should be handled as a development installation of the repository branch `batch`:: + + git clone https://github.com/Neurosim-lab/netpyne.git + cd netpyne + git checkout batch + pip install -e . + +The batchtools installation either:: + + pip install -u batchtk + +or a development install (recommended):: + + git clone https://github.com/jchen6727/batchtk.git + cd batchtk + pip install -e . + +Ray is a dependency for batchtools, and should be installed with the following command:: + + pip install -u ray[default] + +2. Examples +----------- +Examples of NetPyNE batchtools usage can be found in the ``examples`` directory `on the NetPyNE github `_. + +Examples of the underlying batchtk package can be in the ``examples`` directory `on the batchtk github `_. + +3. Retrieving batch configuration values through the ``specs`` object +--------------------------------------------------------------------- +Each simulation is able to retrieve relevant configurations through the ``specs`` object, and communicate with +the dispatcher through the ``comm`` object. + +importing the relevant objects:: + + from netpyne.batchtools import specs, comm + cfg = specs.SimConfig() # create a SimConfig object, can be provided with a dictionary on initial call to set initial values + netParams = specs.NetParams() # create a netParams object + +``netpyne.batchtools.specs`` behaves similarly to ``netpyne.sim.specs`` except in the following cases: + +* ``netpyne.batchtools.specs`` automatically captures relevant configuration mappings created by the ``dispatcher`` upon initialization + + * these mappings can be retrieved via ``specs.get_mappings()`` + +* the SimConfig object created by ``netpyne.batch.specs.SimConfig()`` will update itself with relevant configuration mappings through the ``update()`` method:: + + from netpyne.batchtools import specs # import the custom batch specs + cfg = specs.SimConfig() # create a SimConfig object + cfg.update() # update the cfg object with any relevant mappings for this particular batch job + +The ``update`` method will update the ``SimConfig`` object with the configuration mappings captured in ``specs`` (see: ``specs.get_mappings()``) + +This replaces the previous idiom for updating the SimConfig object with mappings from the batched job submission:: + + try: + from __main__ import cfg # import SimConfig object with params from parent module + except: + from cfg import cfg # if no simConfig in parent module, import directly from tut8_cfg module + + + + +4. Communicating results to the ``dispatcher`` with the ``comm`` object +----------------------------------------------------------------------- + +Prior batched simulations relied on ``.pkl`` files to communicate data. The ``netpyne.batch`` subpackage uses a specific ``comm`` object to send custom data back +The ``comm`` object determines the method of communication based on the batch job submission type. + +In terms of the simulation, the following functions are available to the user: + +* **comm.initialize()**: establishes a connection with the batch ``dispatcher`` for sending data + +* **comm.send()**: sends ```` to the batch ``dispatcher`` + + * for ``search`` jobs, it is important to match the data sent with the metric specified in the search function + +* **comm.close()**: closes and cleans up the connection with the batch ``dispatcher`` + +5. Specifying a batch job +------------------------- +Batch job handling is implemented with methods from ``netpyne.batchtools.search`` + +**search** + +.. code-block:: python + + def search(job_type: str, # the submission engine to run a single simulation (e.g. 'sge', 'sh') + comm_type: str, # the method of communication between host dispatcher and the simulation (e.g. 'socket', 'filesystem') + run_config: Dict, # batch configuration, (keyword: string pairs to customize the submit template) + params: Dict, # search space (dictionary of parameter keys: tune search spaces) + algorithm: Optional[str] = "variant_generator", # search algorithm to use, see SEARCH_ALG_IMPORT for available options + label: Optional[str] = 'search', # label for the search + output_path: Optional[str] = '../batch', # directory for storing generated files + checkpoint_path: Optional[str] = '../ray', # directory for storing checkpoint files + max_concurrent: Optional[int] = 1, # number of concurrent trials to run at one time + batch: Optional[bool] = True, # whether concurrent trials should run synchronously or asynchronously + num_samples: Optional[int] = 1, # number of trials to run + metric: Optional[str] = "loss", # metric to optimize (this should match some key: value pair in the returned data + mode: Optional[str] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric + algorithm_config: Optional[dict] = None, # additional configuration for the search algorithm + ) -> tune.ResultGrid: # results of the search + +The basic search implemented with the ``search`` function uses ``ray.tune`` as the search algorithm backend, returning a ``tune.ResultGrid`` which can be used to evaluate the search space and results. It takes the following parameters; + +* **job_type**: either "``sge``" or "``sh``", specifying how the job should be submitted, "``sge``" will submit batch jobs through the Sun Grid Engine. "``sh``" will submit bach jobs through the shell on a local machine +* **comm_type**: either "``socket``" or "``filesystem``", specifying how the job should communicate with the dispatcher +* **run_config**: a dictionary of keyword: string pairs to customize the submit template, the expected keyword: string pairs are dependent on the job_type:: + + ======= + sge + ======= + queue: the queue to submit the job to (#$ -q {queue}) + cores: the number of cores to request for the job (#$ -pe smp {cores}) + vmem: the amount of memory to request for the job (#$ -l h_vmem={vmem}) + realtime: the amount of time to request for the job (#$ -l h_rt={realtime}) + command: the command to run for the job + + example: + run_config = { + 'queue': 'cpu.q', # request job to be run on the 'cpu.q' queue + 'cores': 8, # request 8 cores for the job + 'vmem': '8G', # request 8GB of memory for the job + 'realtime': '24:00:00', # set timeout of the job to 24 hours + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py' + } # set the command to be run to 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py' + + ======= + sh + ======= + command: the command to run for the job + + example: + run_config = { + 'command': 'mpiexec -n 8 nrniv -python -mpi init.py' + } # set the command to be run + +* **params**: a dictionary of config values to perform the search over. The keys of the dictionary should match the keys of the config object to be updated. Lists or numpy generators >2 values will force a grid search over the values; otherwise, a list of two values will create a uniform distribution sample space. + + **usage 1**: updating a constant value specified in the ``SimConfig`` object + +.. code-block:: python + + # take a config object with the following parameter ``foo`` + cfg = specs.SimConfig() + cfg.foo = 0 + cfg.update() + + # specify a search space for ``foo`` such that a simulation will run with: + # cfg.foo = 0 + # cfg.foo = 1 + # cfg.foo = 2 + # ... + # cfg.foo = 9 + + # using: + params = { + 'foo': range(10) + } + + **usage 2**: updating a nested object in the ``SimConfig`` object + +.. code-block:: python + + # to update a nested object, the package uses the `.` operator to specify reflection into the object. + # take a config object with the following parameter object ``foo`` + cfg = specs.SimConfig() + cfg.foo = {'bar': 0, 'baz': 0} + cfg.update() + + # specify a search space for ``foo['bar']`` with `foo.bar` such that a simulation will run: + # cfg.foo['bar'] = 0 + # cfg.foo['bar'] = 1 + # cfg.foo['bar'] = 2 + # ... + # cfg.foo['bar'] = 9 + + # using: + params = { + 'foo.bar': range(10) + } + + # this reflection works with nested objects as well... + # i.e. + # cfg.foo = {'bar': {'baz': 0}} + # params = {'foo.bar.baz': range(10)} + + **usage 3**: updating a list object in the ``SimConfig`` object + +.. code-block:: python + + # to update a nested object, the package uses the `.` operator to specify reflection into the object. + # take a config object with the following + cfg = specs.SimConfig() + cfg.foo = [0, 1, 4, 9, 16] + cfg.update() + + # specify a search space for ``foo[0]`` with `foo.0` such that a simulation will run: + # cfg.foo[0] = 0 + # cfg.foo[0] = 1 + # cfg.foo[0] = 2 + # ... + # cfg.foo[0] = 9 + + # using: + params = { + 'foo.0': range(10) + } + + # this reflection works with nested objects as well... + +* **algorithm** : the search algorithm (supported within ``ray.tune``) + + **Supported algorithms** + +.. code-block:: python + + * "variant_generator": grid and random based search of the parameter space (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "random": grid and random based search of the parameter space (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "axe": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "bayesopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "hyperopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "bohb": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "nevergrad": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "optuna": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "hebo": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "sigopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "zoopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + +* **label**: a label for the search, used for output file naming + +* **output_path**: the directory for storing generated files, can be a relative or absolute path + +* **checkpoint_path**: the directory for storing checkpoint files in case the search needs to be restored, can be a relative or absolute path + +* **max_concurrent**: the number of concurrent trials to run at one time, it is recommended to keep in mind the resource usage of each trial to avoid overscheduling + +* **batch**: whether concurrent trials should run synchronously or asynchronously + +* **num_samples**: the number of trials to run, for any grid search, each value in the grid will be sampled ``num_samples`` times. + +* **metric**: the metric to optimize (this should match some key: value pair in the returned data) + +* **mode**: either 'min' or 'max' (whether to minimize or maximize the metric) + +* **algorithm_config**: additional configuration for the search algorithm (see the `optuna docs `_) + +6. Batch searches on the Rosenbrock function (some simple examples) +------------------------------------------------------------------- +The ``examples`` directory `on the NetPyNE github `_ contains multiple methods of performing automatic parameter search of a +2 dimensional Rosenbrock function. These examples are used to quickly demonstrate some of the functionality of batch communications rather than the full process of running parameter searches on a detailed +NEURON simulation (see 7. Performing parameter optimization searches (CA3 example)) and therefore only contain the a `batch.py` file containing the script detailing the parameter space and search method, and a +`rosenbrock.py` file containing the function to explore, and the appropriate declarations and calls for batch automation and communication (rather than the traditional `cfg.py`, `netParams.py`, and `init.py` files). + +1. `basic_rosenbrock `_ + +This demonstrates a basic grid search of the Rosenbrock function using the new ``batchtools``, where the search space is defined as the cartesian product of ``params['x0']`` and ``params['x1']`` + +.. code-block:: python + + # from batch.py + params = {'x0': [0, 3], + 'x1': [0, 3], + } + +that is, with the values ``cfg.x0``, ``cfg.x1`` iterating over: ``[(0, 0), (0, 3), (3, 0), (3, 3)]`` list + +2. `coupled_rosenbrock `_ + +This demonstrates a basic paired grid search, where ``x0`` is ``[0, 1, 2]`` and x1[n] is ``x0[n]**2`` + +.. code-block:: python + + # from batch.py + x0 = numpy.arange(0, 3) + x1 = x0**2 + + x0_x1 = [*zip(x0, x1)] + params = {'x0_x1': x0_x1 + } + +the ``x0`` and ``x1`` values are paired together to create a search space ``x0_x1`` iterating over: ``[(0, 0), (1, 1), (2, 4)]`` list + +then, in the ``rosenbrock.py`` file, a list of two values ``cfg.x0_x1`` is created to capture the ``x0_x1`` values, which is then unpacked into individual ``x0`` and ``x1`` values + +.. code-block:: python + + # from rosenbrock.py + cfg.x0_x1 = [1, 1] + + cfg.update_cfg() + + # -------------- unpacking x0_x1 list -------------- # + x0, x1 = cfg.x0_x1 + +then the Rosenbrock function is evaluated with the unpacked ``x0`` and ``x1`` + +3. `random_rosenbrock `_ + +This demonstrates a grid search over a nested object, where ``xn`` is a list of 2 values which are independently modified to search the cartesian product of ``[0, 1, 2, 3, 4]`` and ``[0, 1, 2, 3, 4]`` + +.. code-block:: python + + # from batch.py + params = {'xn.0': numpy.arange(0, 5), + 'xn.1': numpy.arange(0, 5) + } + +By using ``xn.0`` and ``xn.1`` we can reference the 0th and 1st elements of the list, which is created and modified in rosenbrock.py + +.. code-block:: python + + # from rosenbrock.py + cfg.xn = [1, 1] + + cfg.update_cfg() + + # ---------------- unpacking x list ---------------- # + x0, x1 = cfg.xn + + +7. Performing parameter optimization searches (CA3 example) +----------------------------------------------------------- +The ``examples`` directory `on the NetPyNE github `_ shows both a ``grid`` based search as well as an ``optuna`` based optimization. + +In the ``CA3`` example, we tune the ``PYR->BC`` ``NMDA`` and ``AMPA`` synaptic weights, as well as the ``BC->PYR`` ``GABA`` synaptic weight. Note the search space is defined + +.. code-block:: python + + # from optuna_search.py + params = {'nmda.PYR->BC' : [1e-3, 1.8e-3], + 'ampa.PYR->BC' : [0.2e-3, 0.5e-3], + 'gaba.BC->PYR' : [0.4e-3, 1.0e-3], + } + +in both ``optuna_search.py``, defining the upper and lower bounds of the search space, while in ``grid_search.py`` the search space is defined + +.. code-block:: python + + # from grid_search.py + params = {'nmda.PYR->BC' : numpy.linspace(1e-3, 1.8e-3, 3), + 'ampa.PYR->BC' : numpy.linspace(0.2e-3, 0.5e-3, 3), + 'gaba.BC->PYR' : numpy.linspace(0.4e-3, 1.0e-3, 3), + } + +which defines ``3x3x3`` specific values to search over + +Note that the ``metric`` specifies a specific ``string`` (``loss``) to report and optimize around. This value is generated and ``sent`` by the ``init.py`` simulation + +.. code-block:: python + + # from init.py + results['PYR_loss'] = (results['PYR'] - 3.33875)**2 + results['BC_loss'] = (results['BC'] - 19.725 )**2 + results['OLM_loss'] = (results['OLM'] - 3.470 )**2 + results['loss'] = (results['PYR_loss'] + results['BC_loss'] + results['OLM_loss']) / 3 + out_json = json.dumps({**inputs, **results}) + + print(out_json) + #TODO put all of this in a single function. + comm.send(out_json) + comm.close() + +The ``out_json`` output contains a dictionary which includes the ``loss`` metric (calculated as the MSE between observed and expected values) + +In a multi-objective optimization, the relevant ``PYR_loss``, ``BC_loss``, and ``OLM_loss`` components are additionally included (see ``mo_optuna_search.py``) + +8. Parameter Importance Evaluation Using fANOVA +----------------------------------------------- +A new feature in the batchtools beta release is the ability to evaluate parameter importance using a functional ANOVA inspired algorithm via the `Optuna` and `scikit-learn` libraries. +(See `the original Hutter paper `_ and its `citation `_) + +Currently, only unpaired single parameter importance to a single metric score is supported through the `NetPyNE.batchtools.analysis` `Analyzer` object, with an example of its usage +`here `_: + +In its current iteration, demonstrating the example requires generating an output `grid.csv` using `batch.py`, then loading that `grid.csv` into the `Analyzer` object. Then, using `run_analysis` will generate, per parameter, a single score indicative of the estimated `importance` of the parameter: that is, the estimated effect on the total variance of the model within the given bounds. + +.. code-block:: python + + # from analysis.py + from netpyne.batchtools.analysis import Analyzer + + analyzer = Analyzer(params = ['x.0', 'x.1', 'x.2', 'x.3'], metrics = ['fx']) # specify the parameter space and metrics of the batch function + analyzer.load_file('grid.csv') # load the grid file generated by the batch run + results = analyzer.run_analysis() # run fANOVA analysis and store the importance values in a results dictionary + diff --git a/netpyne/batchtools/evol.py b/netpyne/batchtools/evol.py new file mode 100644 index 000000000..a30d29982 --- /dev/null +++ b/netpyne/batchtools/evol.py @@ -0,0 +1,6 @@ +import numpy +from cmaes import CMA + +from netpyne.batchtools import specs, comm + +# ---- Rosenbrock Function & Constant Definition ---- # diff --git a/netpyne/batchtools/examples/CA3/README.md b/netpyne/batchtools/examples/CA3/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/netpyne/batchtools/examples/CA3/cfg.py b/netpyne/batchtools/examples/CA3/cfg.py new file mode 100644 index 000000000..8780792fc --- /dev/null +++ b/netpyne/batchtools/examples/CA3/cfg.py @@ -0,0 +1,48 @@ +from netpyne.batchtools import specs + +### config ### + +cfg = specs.SimConfig() + +cfg.duration = 1000 +cfg.dt = 0.1 +cfg.hparams = {'v_init': -65.0} +cfg.verbose = False +cfg.recordTraces = {} # don't save this +cfg.recordStim = False +cfg.recordStep = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc) +cfg.filename = '00' # Set file output name +cfg.savePickle = False # Save params, network and sim output to pickle file +cfg.saveDat = False +cfg.saveJson = True +cfg.printRunTime = 0.1 +cfg.recordLFP = None # don't save this +cfg.simLabel = 'ca3' +cfg.saveFolder = '.' + + +cfg.analysis['plotRaster'] = {'saveFig': True} # raster ok +cfg.analysis['plotTraces'] = { } # don't save this +cfg.analysis['plotLFPTimeSeries'] = { } # don't save this + +cfg.cache_efficient = True # better with MPI? +""" remove all of the unecessary data """ +cfg.saveCellSecs = False +cfg.saveCellConns = False + +cfg.nmda={#NMDA search space + "PYR->BC" : 1.38e-3, + "PYR->OLM": 0.7e-3, + "PYR->PYR": 0.004e-3, +} +cfg.ampa={#AMPA search space + "PYR->BC" : 0.36e-3, + "PYR->OLM": 0.36e-3, + "PYR->PYR": 0.02e-3, +} + +cfg.gaba = {#GABA search space + "BC->BC" : 4.5e-3, + "BC->PYR" : 0.72e-3, + "OLM->PYR": 72e-3, +} diff --git a/netpyne/batchtools/examples/CA3/grid_search.py b/netpyne/batchtools/examples/CA3/grid_search.py new file mode 100644 index 000000000..3ad5dedd2 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/grid_search.py @@ -0,0 +1,41 @@ +from netpyne.batchtools.search import search +import numpy + +params = {'nmda.PYR->BC' : numpy.linspace(1e-3, 1.8e-3, 3), + #'nmda.PYR->OLM': numpy.linspace(0.4e-3, 1.0e-3, 3), + #'nmda.PYR->PYR': numpy.linspace(0.001e-3, 0.007e-3, 3), + 'ampa.PYR->BC' : numpy.linspace(0.2e-3, 0.5e-3, 3), + #'ampa.PYR->OLM': numpy.linspace(0.2e-3, 0.5e-3, 3), + #'ampa.PYR->PYR': numpy.linspace(0.01e-3, 0.03e-3, 3), + #'gaba.BC->BC' : numpy.linspace(1e-3, 7e-3, 3), + 'gaba.BC->PYR' : numpy.linspace(0.4e-3, 1.0e-3, 3), + #'gaba.OLM->PYR': numpy.linspace(40e-3, 100e-3, 3), + } + +# use batch_shell_config if running directly on the machine +shell_config = {'command': 'mpiexec -np 4 nrniv -python -mpi init.py',} + +# use batch_sge_config if running on a +sge_config = { + 'queue': 'cpu.q', + 'cores': 5, + 'vmem': '4G', + 'realtime': '00:30:00', + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py'} + + +run_config = sge_config + +search(job_type = 'sge', # or 'sh' + comm_type = 'socket', + label = 'grid', + + params = params, + output_path = '../grid_batch', + checkpoint_path = '../ray', + run_config = run_config, + num_samples = 1, + metric = 'loss', + mode = 'min', + algorithm = "variant_generator", + max_concurrent = 9) diff --git a/netpyne/batchtools/examples/CA3/init.py b/netpyne/batchtools/examples/CA3/init.py new file mode 100644 index 000000000..b84a47b9a --- /dev/null +++ b/netpyne/batchtools/examples/CA3/init.py @@ -0,0 +1,27 @@ +from netpyne.batchtools import specs, comm +from netpyne import sim +from netParams import netParams, cfg +import json + +comm.initialize() + +sim.createSimulate(netParams=netParams, simConfig=cfg) +print('completed simulation...') +#comm.pc.barrier() +#sim.gatherData() +if comm.is_host(): + netParams.save("{}/{}_params.json".format(cfg.saveFolder, cfg.simLabel)) + print('transmitting data...') + inputs = specs.get_mappings() + #print(json.dumps({**inputs})) + results = sim.analysis.popAvgRates(show=False) + + results['PYR_loss'] = (results['PYR'] - 3.33875)**2 + results['BC_loss'] = (results['BC'] - 19.725 )**2 + results['OLM_loss'] = (results['OLM'] - 3.470 )**2 + results['loss'] = (results['PYR_loss'] + results['BC_loss'] + results['OLM_loss']) / 3 + out_json = json.dumps({**inputs, **results}) + + print(out_json) + comm.send(out_json) + comm.close() diff --git a/netpyne/batchtools/examples/CA3/mo_optuna_search.py b/netpyne/batchtools/examples/CA3/mo_optuna_search.py new file mode 100644 index 000000000..25f478051 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mo_optuna_search.py @@ -0,0 +1,50 @@ +from netpyne.batchtools.search import ray_optuna_search +from netpyne.batchtools import dispatchers, submits +import batchtk + +from ray import tune + +params = {'nmda.PYR->BC' : tune.uniform(1e-3, 1.8e-3), + #'nmda.PYR->OLM': tune.uniform(0.4e-3, 1.0e-3), + #'nmda.PYR->PYR': tune.uniform(0.001e-3, 0.007e-3), + 'ampa.PYR->BC' : tune.uniform(0.2e-3, 0.5e-3), + #'ampa.PYR->OLM': tune.uniform(0.2e-3, 0.5e-3), + #'ampa.PYR->PYR': tune.uniform(0.01e-3, 0.03e-3), + #'gaba.BC->BC' : tune.uniform(1e-3, 7e-3), + 'gaba.BC->PYR' : tune.uniform(0.4e-3, 1.0e-3), + #'gaba.OLM->PYR': tune.uniform(40e-3, 100e-3), + } + +# use batch_shell_config if running directly on the machine +shell_config = {'command': 'mpiexec -np 4 nrniv -python -mpi init.py',} + +# use batch_sge_config if running on a +sge_config = { + 'queue': 'cpu.q', + 'cores': 5, + 'vmem': '4G', + 'realtime': '00:30:00', + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py'} + +run_config = sge_config + +Dispatcher = dispatchers.INETDispatcher +Submit = submits.SGESubmitSOCK +metrics = ['PYR_loss', 'BC_loss', 'OLM_loss', 'loss'] + +ray_study = ray_optuna_search( + dispatcher_constructor = Dispatcher, + submit_constructor=Submit, + params = params, + run_config = run_config, + max_concurrent = 3, + output_path = '../mo_batch', + checkpoint_path = '../ray', + label = 'mo_search', + num_samples = 15, + metric = metrics, + mode = ['min', 'min', 'min', 'loss'],) + +results = { + metric: ray_study.results.get_best_result(metric, 'min') for metric in metrics +} diff --git a/netpyne/batchtools/examples/CA3/mod/CA1ih.mod b/netpyne/batchtools/examples/CA3/mod/CA1ih.mod new file mode 100644 index 000000000..93d435e30 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/CA1ih.mod @@ -0,0 +1,64 @@ +: $Id: CA1ih.mod,v 1.4 2010/12/13 21:35:47 samn Exp $ +TITLE Ih CA3 + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) +} + +NEURON { + SUFFIX hcurrent + NONSPECIFIC_CURRENT ih + RANGE g, e, v50, htau, hinf + RANGE gfactor +} + +PARAMETER { + celsius (degC) + g= 0.0001 (mho/cm2) + e= -30 (mV) + v50=-82 (mV) + gfactor = 1 +} + +STATE { + h +} + +ASSIGNED { + ih (mA/cm2) + hinf + htau (ms) + v (mV) +} + +PROCEDURE iassign () { ih=g*h*(v-e)*gfactor } + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { + rates(v) + h'= (hinf- h)/ htau +} + +INITIAL { + rates(v) + h = hinf + iassign() +} + +PROCEDURE rates(v (mV)) { + UNITSOFF + : HCN1 + :hinf = 1/(1+exp(0.151*(v-v50))) + :htau = exp((0.033*(v+75)))/(0.011*(1+exp(0.083*(v+75)))) + + : HCN2 + hinf = 1/(1+exp((v-v50)/10.5)) + htau = (1/(exp(-14.59-0.086*v)+exp(-1.87+0.0701*v))) + UNITSON +} + diff --git a/netpyne/batchtools/examples/CA3/mod/CA1ika.mod b/netpyne/batchtools/examples/CA3/mod/CA1ika.mod new file mode 100644 index 000000000..9e4fe6922 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/CA1ika.mod @@ -0,0 +1,85 @@ +: $Id: CA1ika.mod,v 1.2 2010/12/01 05:06:07 samn Exp $ +TITLE Ika CA1 + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) +} + +NEURON { + SUFFIX kacurrent + NONSPECIFIC_CURRENT ika, ikad + RANGE g, gd, e, ninf, ntau, ndinf, ndtau, linf, ltau +} + +PARAMETER { + celsius (degC) + g= 0.048 (mho/cm2) + gd= 0 (mho/cm2) + e= -90 (mV) +} + +STATE { + n + nd : distal + l +} + +ASSIGNED { + v (mV) + ika (mA/cm2) + ikad (mA/cm2) + ninf + ntau (ms) + ndinf + ndtau (ms) + linf + ltau (ms) +} + +PROCEDURE iassign () { + ika=g*n*l*(v-e) + ikad=gd*nd*l*(v-e) +} + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { + rates(v) + n'= (ninf- n)/ ntau + l'= (linf- l)/ ltau + nd'= (ndinf-nd)/ndtau +} + +INITIAL { + rates(v) + n = ninf + l = linf + iassign() +} + +PROCEDURE rates(v (mV)) { + LOCAL a, b + UNITSOFF + a = exp(-0.038*(1.5+1/(1+exp(v+40)/5))*(v-11)) + b = exp(-0.038*(0.825+1/(1+exp(v+40)/5))*(v-11)) + ntau=4*b/(1+a) + if (ntau<0.1) {ntau=0.1} + ninf=1/(1+a) + + a=exp(-0.038*(1.8+1/(1+exp(v+40)/5))*(v+1)) + b=exp(-0.038*(0.7+1/(1+exp(v+40)/5))*(v+1)) + ndtau=2*b/(1+a) + if (ndtau<0.1) {ndtau=0.1} + ndinf=1/(1+a) + + a = exp(0.11*(v+56)) + ltau=0.26*(v+50) + if (ltau<2) {ltau=2} + linf=1/(1+a) + UNITSON +} + diff --git a/netpyne/batchtools/examples/CA3/mod/CA1ikdr.mod b/netpyne/batchtools/examples/CA3/mod/CA1ikdr.mod new file mode 100644 index 000000000..4c5236362 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/CA1ikdr.mod @@ -0,0 +1,60 @@ +: $Id: CA1ikdr.mod,v 1.2 2010/12/01 05:10:52 samn Exp $ +TITLE IKDR CA1 + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) +} + +NEURON { + SUFFIX kdrcurrent + NONSPECIFIC_CURRENT ik + RANGE g, e, ninf, ntau +} + +PARAMETER { + celsius (degC) + g = 0.010 (mho/cm2) + e = -90 (mV) +} + +STATE { + n +} + +ASSIGNED { + v (mV) + ik (mA/cm2) + ninf + ntau (ms) +} + +PROCEDURE iassign () { ik=g*n*(v-e) } + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { + rates(v) + n'= (ninf- n)/ ntau +} + +INITIAL { + rates(v) + n = ninf + iassign() +} + +PROCEDURE rates(v (mV)) { + LOCAL a, b + UNITSOFF + a = exp(-0.11*(v-13)) + b = exp(-0.08*(v-13)) + ntau=50*b/(1+a) + if (ntau<2) {ntau=2} + ninf=1/(1+a) + UNITSON +} + diff --git a/netpyne/batchtools/examples/CA3/mod/CA1ina.mod b/netpyne/batchtools/examples/CA3/mod/CA1ina.mod new file mode 100644 index 000000000..d33ab9739 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/CA1ina.mod @@ -0,0 +1,89 @@ +: $Id: CA1ina.mod,v 1.4 2010/11/30 19:50:00 samn Exp $ +TITLE INa CA1 + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) +} + +NEURON { + SUFFIX nacurrent + NONSPECIFIC_CURRENT ina + RANGE g, e, vi, ki + RANGE minf,hinf,iinf,mtau,htau,itau : testing +} + +PARAMETER { + : v (mV) + celsius (degC) + g = 0.032 (mho/cm2) + e = 55 (mV) + vi = -60 (mV) + ki = 0.8 +} + +STATE { + m + h + I : i +} + +ASSIGNED { + i (mA/cm2) + ina (mA/cm2) + minf + mtau (ms) + hinf + htau (ms) + iinf + itau (ms) + v (mV) : testing +} + +: PROCEDURE iassign () { ina=g*m*m*m*h*i*(v-e) } +PROCEDURE iassign () { i=g*m*m*m*h*I*(v-e) ina=i} + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { + rates(v) + m' = (minf - m) / mtau + h' = (hinf - h) / htau + : i' = (iinf - i) / itau + I' = (iinf - I) / itau +} + +INITIAL { + rates(v) + h = hinf + m = minf + : i = iinf + I = iinf + iassign() : testing +} + + +PROCEDURE rates(v (mV)) { + LOCAL a, b + UNITSOFF + a = 0.4*(v+30)/(1-exp(-(v+30)/7.2)) + b = 0.124*(v+30)/(exp((v+30)/7.2)-1) + mtau=0.5/(a+b) + if (mtau<0.02) {mtau=0.02} + minf=a/(a+b) + a = 0.03*(v+45)/(1-exp(-(v+45)/1.5)) + b = 0.01*(v+45)/(exp((v+45)/1.5)-1) + htau=0.5/(a+b) + if (htau<0.5) {htau=0.5} + hinf=1/(1+exp((v+50)/4)) + a = exp(0.45*(v+66)) + b = exp(0.09*(v+66)) + itau=3000*b/(1+a) + if (itau<10) {itau=10} + iinf=(1+ki*exp((v-vi)/2))/(1+exp((v-vi)/2)) + UNITSON +} + diff --git a/netpyne/batchtools/examples/CA3/mod/MyExp2SynBB.mod b/netpyne/batchtools/examples/CA3/mod/MyExp2SynBB.mod new file mode 100644 index 000000000..9a68baef1 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/MyExp2SynBB.mod @@ -0,0 +1,67 @@ +: $Id: MyExp2SynBB.mod,v 1.4 2010/12/13 21:27:51 samn Exp $ +NEURON { +: THREADSAFE + POINT_PROCESS MyExp2SynBB + RANGE tau1, tau2, e, i, g, Vwt, gmax + NONSPECIFIC_CURRENT i +} + +UNITS { + (nA) = (nanoamp) + (mV) = (millivolt) + (uS) = (microsiemens) +} + +PARAMETER { + tau1=.1 (ms) <1e-9,1e9> + tau2 = 10 (ms) <1e-9,1e9> + e=0 (mV) + gmax = 1e9 (uS) + Vwt = 0 : weight for inputs coming in from vector +} + +ASSIGNED { + v (mV) + i (nA) + g (uS) + factor + etime (ms) +} + +STATE { + A (uS) + B (uS) +} + +INITIAL { + LOCAL tp + + Vwt = 0 : testing + + if (tau1/tau2 > .9999) { + tau1 = .9999*tau2 + } + A = 0 + B = 0 + tp = (tau1*tau2)/(tau2 - tau1) * log(tau2/tau1) + factor = -exp(-tp/tau1) + exp(-tp/tau2) + factor = 1/factor +} + +BREAKPOINT { + SOLVE state METHOD cnexp + g = B - A + if (g>gmax) {g=gmax}: saturation + i = g*(v - e) +} + +DERIVATIVE state { + A' = -A/tau1 + B' = -B/tau2 +} + +NET_RECEIVE(w (uS)) {LOCAL ww + ww=w + A = A + ww*factor + B = B + ww*factor +} diff --git a/netpyne/batchtools/examples/CA3/mod/MyExp2SynNMDABB.mod b/netpyne/batchtools/examples/CA3/mod/MyExp2SynNMDABB.mod new file mode 100644 index 000000000..01291643a --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/MyExp2SynNMDABB.mod @@ -0,0 +1,108 @@ +: $Id: MyExp2SynNMDABB.mod,v 1.4 2010/12/13 21:28:02 samn Exp $ +NEURON { +: THREADSAFE + POINT_PROCESS MyExp2SynNMDABB + RANGE tau1, tau2, e, i, iNMDA, s, sNMDA, r, tau1NMDA, tau2NMDA, Vwt, smax, sNMDAmax + NONSPECIFIC_CURRENT i, iNMDA +} + +UNITS { + (nA) = (nanoamp) + (mV) = (millivolt) + (uS) = (microsiemens) +} + +PARAMETER { + tau1 = 0.1 (ms) <1e-9,1e9> + tau2 = 10 (ms) <1e-9,1e9> + tau1NMDA = 15 (ms) + tau2NMDA = 150 (ms) + e = 0 (mV) + mg = 1 + r = 1 + smax = 1e9 (1) + sNMDAmax = 1e9 (1) + + Vwt = 0 : weight for inputs coming in from vector +} + +ASSIGNED { + v (mV) + i (nA) + iNMDA (nA) + s (1) + sNMDA (1) + mgblock (1) + factor (1) + factor2 (1) + + etime (ms) +} + +STATE { + A (1) + B (1) + A2 (1) + B2 (1) +} + +INITIAL { + + LOCAL tp + + Vwt = 0 : testing + + if (tau1/tau2 > .9999) { + tau1 = .9999*tau2 + } + A = 0 + B = 0 + tp = (tau1*tau2)/(tau2 - tau1) * log(tau2/tau1) + factor = -exp(-tp/tau1) + exp(-tp/tau2) + factor = 1/factor + + if (tau1NMDA/tau2NMDA > .9999) { + tau1NMDA = .9999*tau2NMDA + } + A2 = 0 + B2 = 0 + tp = (tau1NMDA*tau2NMDA)/(tau2NMDA - tau1NMDA) * log(tau2NMDA/tau1NMDA) + factor2 = -exp(-tp/tau1NMDA) + exp(-tp/tau2NMDA) + factor2 = 1/factor2 +} + +BREAKPOINT { + SOLVE state METHOD cnexp + : Jahr Stevens 1990 J. Neurosci + mgblock = 1.0 / (1.0 + 0.28 * exp(-0.062(/mV) * v) ) + s = B - A + sNMDA = B2 - A2 + if (s >smax) {s =smax }: saturation + if (sNMDA>sNMDAmax) {sNMDA=sNMDAmax}: saturation + i = s * (v - e) + iNMDA = sNMDA * (v - e) * mgblock +} + +DERIVATIVE state { + A' = -A/tau1 + B' = -B/tau2 + A2' = -A2/tau1NMDA + B2' = -B2/tau2NMDA +} + +NET_RECEIVE(w (uS)) {LOCAL ww + ww=w + :printf("NMDA Spike: %g\n", t) + if(r>=0){ : if r>=0, g = AMPA + NMDA*r + A = A + factor *ww + B = B + factor *ww + A2 = A2 + factor2*ww*r + B2 = B2 + factor2*ww*r + }else{ + if(r>-1000){ : if r>-1, g = NMDA*r + A2 = A2 - factor2*ww*r + B2 = B2 - factor2*ww*r + } + : if r<0 and r<>-1, g = 0 + } +} diff --git a/netpyne/batchtools/examples/CA3/mod/aux_fun.inc b/netpyne/batchtools/examples/CA3/mod/aux_fun.inc new file mode 100644 index 000000000..ccb579afb --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/aux_fun.inc @@ -0,0 +1,43 @@ +: $Id: aux_fun.inc,v 1.1 2009/11/04 01:24:52 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + + + +:------------------------------------------------------------------- +FUNCTION fun1(v(mV),V0(mV),A(/ms),B(mV))(/ms) { + + fun1 = A*exp((v-V0)/B) +} + +FUNCTION fun2(v(mV),V0(mV),A(/ms),B(mV))(/ms) { + + fun2 = A/(exp((v-V0)/B)+1) +} + +FUNCTION fun3(v(mV),V0(mV),A(/ms),B(mV))(/ms) { + + if(fabs((v-V0)/B)<1e-6) { + :if(v==V0) { + fun3 = A*B/1(mV) * (1- 0.5 * (v-V0)/B) + } else { + fun3 = A/1(mV)*(v-V0)/(exp((v-V0)/B)-1) + } +} + +FUNCTION min(x,y) { if (x<=y){ min = x }else{ min = y } } +FUNCTION max(x,y) { if (x>=y){ max = x }else{ max = y } } diff --git a/netpyne/batchtools/examples/CA3/mod/caolmw.mod b/netpyne/batchtools/examples/CA3/mod/caolmw.mod new file mode 100644 index 000000000..3ea21a7ef --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/caolmw.mod @@ -0,0 +1,47 @@ +: $Id: caolmw.mod,v 1.2 2010/11/30 16:40:09 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + +UNITS { + (mollar) = (1/liter) + (M) = (mollar) + (mM) = (millimollar) + (mA) = (milliamp) + (mV) = (millivolt) + (mS) = (millisiemens) +} + +NEURON { + SUFFIX Caolmw + USEION ca READ ica, cai WRITE cai + RANGE alpha, tau +} + +PARAMETER { + alpha = 0.002 (cm2-M/mA-ms) + tau = 80 (ms) +} + +ASSIGNED { ica (mA/cm2) } + +INITIAL { cai = 0 } + +STATE { cai (mM) } + +BREAKPOINT { SOLVE states METHOD cnexp } + +DERIVATIVE states { cai' = -(1000) * alpha * ica - cai/tau } diff --git a/netpyne/batchtools/examples/CA3/mod/icaolmw.mod b/netpyne/batchtools/examples/CA3/mod/icaolmw.mod new file mode 100644 index 000000000..51112d099 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/icaolmw.mod @@ -0,0 +1,51 @@ +: $Id: icaolmw.mod,v 1.2 2010/11/30 16:44:13 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) + (mS) = (millisiemens) +} + +NEURON { + SUFFIX ICaolmw + USEION ca WRITE ica + RANGE gca,eca +} + +PARAMETER { + gca = 1 (mS/cm2) + eca = 120 (mV) +} + +ASSIGNED { + ica (mA/cm2) + v (mV) +} + +PROCEDURE iassign () { ica = (1e-3) * gca * mcainf(v)^2 * (v-eca) } + +INITIAL { + iassign() +} + +BREAKPOINT { iassign() } + +FUNCTION mcainf(v(mV)) { mcainf = fun2(v, -20, 1, -9)*1(ms) } + +INCLUDE "aux_fun.inc" diff --git a/netpyne/batchtools/examples/CA3/mod/iholmw.mod b/netpyne/batchtools/examples/CA3/mod/iholmw.mod new file mode 100644 index 000000000..ccd919202 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/iholmw.mod @@ -0,0 +1,60 @@ +: $Id: iholmw.mod,v 1.2 2010/11/30 16:34:22 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) + (mS) = (millisiemens) +} + +NEURON { + SUFFIX Iholmw + NONSPECIFIC_CURRENT i + RANGE gh,eh +} + +PARAMETER { + gh = 0.15 (mS/cm2) + eh = -40 (mV) +} + +ASSIGNED { + v (mV) + i (mA/cm2) +} + +STATE { q } + +PROCEDURE iassign () { i = (1e-3) * gh * q * (v-eh) } + +INITIAL { + q = qinf(v) + iassign() +} + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { q' = (qinf(v)-q)/qtau(v) } + +FUNCTION qinf(v(mV)) { qinf = fun2(v, -80, 1, 10)*1(ms) } +FUNCTION qtau(v(mV))(ms) { qtau = 200(ms)/(exp((v+70(mV))/20(mV))+exp(-(v+70(mV))/20(mV))) + 5(ms) } + +INCLUDE "aux_fun.inc" diff --git a/netpyne/batchtools/examples/CA3/mod/kcaolmw.mod b/netpyne/batchtools/examples/CA3/mod/kcaolmw.mod new file mode 100644 index 000000000..b2368787e --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/kcaolmw.mod @@ -0,0 +1,52 @@ +: $Id: kcaolmw.mod,v 1.2 2010/11/30 16:47:18 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) + (mS) = (millisiemens) + (mollar) = (1/liter) + (mM) = (millimollar) +} + +NEURON { + SUFFIX KCaolmw + USEION k WRITE ik + USEION ca READ cai + RANGE gkca,ek,kd +} + +PARAMETER { + gkca = 10 (mS/cm2) + ek = -90 (mV) + kd = 30 (mM) +} + +ASSIGNED { + cai (mM) + v (mV) + ik (mA/cm2) +} + +PROCEDURE iassign () { ik = (1e-3) * gkca * cai/(cai+kd) * (v-ek) } + +INITIAL { + iassign() +} + +BREAKPOINT { iassign() } diff --git a/netpyne/batchtools/examples/CA3/mod/kdrbwb.mod b/netpyne/batchtools/examples/CA3/mod/kdrbwb.mod new file mode 100644 index 000000000..fc52ae534 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/kdrbwb.mod @@ -0,0 +1,76 @@ +: $Id: kdrbwb.mod,v 1.4 2010/12/13 21:35:26 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) + (mS) = (millisiemens) +} + +NEURON { + SUFFIX Kdrbwb + USEION k WRITE ik + RANGE phin,gkdr,ek + RANGE taon,ninf +} + +PARAMETER { + gkdr = 9 (mS/cm2) + ek = -90 (mV) + phin = 5 +} + +ASSIGNED { + v (mV) + ik (mA/cm2) + celsius (degC) + ninf (1) + taon (ms) +} + +STATE { n } + +PROCEDURE iassign () { ik = (1e-3) * gkdr * n^4 * (v-ek) } + +INITIAL { + rates(v) + n = ninf + iassign() +} + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { + rates(v) + n' = (ninf-n)/taon +} + +PROCEDURE rates(v(mV)) { LOCAL an, bn, q10 + q10 = phin:^((celsius-27.0(degC))/10.0(degC)) + + an = fun3(v, -34, -0.01, -10) + bn = fun1(v, -44, 0.125, -80) + + ninf = an/(an+bn) + taon = 1./((an+bn)*q10) +} + +INCLUDE "aux_fun.inc" diff --git a/netpyne/batchtools/examples/CA3/mod/nafbwb.mod b/netpyne/batchtools/examples/CA3/mod/nafbwb.mod new file mode 100644 index 000000000..37281dc94 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mod/nafbwb.mod @@ -0,0 +1,81 @@ +: $Id: nafbwb.mod,v 1.4 2010/12/13 21:35:08 samn Exp $ +COMMENT + +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +// +// NOTICE OF COPYRIGHT AND OWNERSHIP OF SOFTWARE +// +// Copyright 2007, The University Of Pennsylvania +// School of Engineering & Applied Science. +// All rights reserved. +// For research use only; commercial use prohibited. +// Distribution without permission of Maciej T. Lazarewicz not permitted. +// mlazarew@seas.upenn.edu +// +//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +ENDCOMMENT + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) + (mS) = (millisiemens) +} + +NEURON { + SUFFIX Nafbwb + USEION na WRITE ina + RANGE phih + RANGE gna, ena, taoh : testing +} + +PARAMETER { + gna = 35 (mS/cm2) + ena = 55 (mV) + phih = 5 +} + +ASSIGNED { + v (mV) + ina (mA/cm2) + minf (1) + hinf (1) + taoh (ms) + celsius (degC) +} + +STATE { h } + +PROCEDURE iassign () { ina = (1e-3) * gna * minf^3 * h * (v-ena) } + +INITIAL { + rates(v) + h = hinf + iassign() +} + +BREAKPOINT { + SOLVE states METHOD cnexp + iassign() +} + +DERIVATIVE states { + rates(v) + h' = (hinf-h)/taoh +} + +PROCEDURE rates(v(mV)) { LOCAL am, bm, ah, bh, q10 + + q10 = phih:^((celsius-27.0(degC))/10.0(degC)) + + am = fun3(v, -35, -0.1, -10) + bm = fun1(v, -60, 4, -18) + minf = am/(am+bm) + + ah = fun1(v, -58, 0.07, -20) + bh = fun2(v, -28, 1, -10) + hinf = ah/(ah+bh) + taoh = 1./((ah+bh)*q10) +} + +INCLUDE "aux_fun.inc" diff --git a/netpyne/batchtools/examples/CA3/netParams.py b/netpyne/batchtools/examples/CA3/netParams.py new file mode 100644 index 000000000..156e9ff20 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/netParams.py @@ -0,0 +1,321 @@ +from netpyne.batchtools import specs +from cfg import cfg + +cfg.update_cfg() +### params ### +# Network parameters +netParams = specs.NetParams() # object of class NetParams to store the network parameters +netParams.defaultThreshold = 0.0 +netParams.defineCellShapes = True # sets 3d geometry aligned along the y-axis + + +############################################################################### +## Cell types +############################################################################### +# Basket cell +BasketCell = {'secs':{}} +BasketCell['secs']['soma'] = {'geom': {}, 'mechs': {}} +BasketCell['secs']['soma']['geom'] = {'diam': 100, 'L': 31.831, 'nseg': 1, 'cm': 1} +BasketCell['secs']['soma']['mechs'] = {'pas': {'g': 0.1e-3, 'e': -65}, 'Nafbwb': {}, 'Kdrbwb': {}} +netParams.cellParams['BasketCell'] = BasketCell + + +# OLM cell +OlmCell = {'secs':{}} +OlmCell['secs']['soma'] = {'geom': {}, 'mechs': {}} +OlmCell['secs']['soma']['geom'] = {'diam': 100, 'L': 31.831, 'nseg': 1, 'cm': 1} +OlmCell['secs']['soma']['mechs'] = { + 'pas': {'g': 0.1e-3, 'e': -65}, + 'Nafbwb': {}, + 'Kdrbwb': {}, + 'Iholmw': {}, + 'Caolmw': {}, + 'ICaolmw': {}, + 'KCaolmw': {}} +netParams.cellParams['OlmCell'] = OlmCell + + +# Pyramidal cell +PyrCell = {'secs':{}} +PyrCell['secs']['soma'] = {'geom': {}, 'mechs': {}} +PyrCell['secs']['soma']['geom'] = {'diam': 20, 'L': 20, 'cm': 1, 'Ra': 150} +PyrCell['secs']['soma']['mechs'] = { + 'pas': {'g': 0.0000357, 'e': -70}, + 'nacurrent': {}, + 'kacurrent': {}, + 'kdrcurrent': {}, + 'hcurrent': {}} +PyrCell['secs']['Bdend'] = {'geom': {}, 'mechs': {}} +PyrCell['secs']['Bdend']['geom'] = {'diam': 2, 'L': 200, 'cm': 1, 'Ra': 150} +PyrCell['secs']['Bdend']['topol'] = {'parentSec': 'soma', 'parentX': 0, 'childX': 0} +PyrCell['secs']['Bdend']['mechs'] = { + 'pas': {'g': 0.0000357, 'e': -70}, + 'nacurrent': {'ki': 1}, + 'kacurrent': {}, + 'kdrcurrent': {}, + 'hcurrent': {}} +PyrCell['secs']['Adend1'] = {'geom': {}, 'mechs': {}} +PyrCell['secs']['Adend1']['geom'] = {'diam': 2, 'L': 150, 'cm': 1, 'Ra': 150} +PyrCell['secs']['Adend1']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # here there is a change: connected to end soma(1) instead of soma(0.5) +PyrCell['secs']['Adend1']['mechs'] = { + 'pas': {'g': 0.0000357, 'e': -70}, + 'nacurrent': {'ki': 0.5}, + 'kacurrent': {'g': 0.072}, + 'kdrcurrent': {}, + 'hcurrent': {'v50': -82, 'g': 0.0002}} +PyrCell['secs']['Adend2'] = {'geom': {}, 'mechs': {}} +PyrCell['secs']['Adend2']['geom'] = {'diam': 2, 'L': 150, 'cm': 1, 'Ra': 150} +PyrCell['secs']['Adend2']['topol'] = {'parentSec': 'Adend1', 'parentX': 1, 'childX': 0} +PyrCell['secs']['Adend2']['mechs'] = { + 'pas': {'g': 0.0000357, 'e': -70}, + 'nacurrent': {'ki': 0.5}, + 'kacurrent': {'g': 0, 'gd': 0.120}, + 'kdrcurrent': {}, + 'hcurrent': {'v50': -90, 'g': 0.0004}} +PyrCell['secs']['Adend3'] = {'geom': {}, 'mechs': {}} +PyrCell['secs']['Adend3']['geom'] = {'diam': 2, 'L': 150, 'cm': 2, 'Ra': 150} +PyrCell['secs']['Adend3']['topol'] = {'parentSec': 'Adend2', 'parentX': 1, 'childX': 0} +PyrCell['secs']['Adend3']['mechs'] = { + 'pas': {'g': 0.0000714, 'e': -70}, + 'nacurrent': {'ki': 0.5}, + 'kacurrent': {'g': 0, 'gd': 0.200}, + 'kdrcurrent': {}, + 'hcurrent': {'v50': -90, 'g': 0.0007}} +netParams.cellParams['PyrCell'] = PyrCell + + +############################################################################### +## Synaptic mechs +############################################################################### + +netParams.synMechParams['AMPAf'] = {'mod': 'MyExp2SynBB', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} +netParams.synMechParams['NMDA'] = {'mod': 'MyExp2SynNMDABB', 'tau1': 0.05, 'tau2': 5.3, 'tau1NMDA': 15, 'tau2NMDA': 150, 'r': 1, 'e': 0} +netParams.synMechParams['GABAf'] = {'mod': 'MyExp2SynBB', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} +netParams.synMechParams['GABAs'] = {'mod': 'MyExp2SynBB', 'tau1': 0.2, 'tau2': 20, 'e': -80} +netParams.synMechParams['GABAss'] = {'mod': 'MyExp2SynBB', 'tau1': 20, 'tau2': 40, 'e': -80} + + +############################################################################### +## Populations +############################################################################### +netParams.popParams['PYR'] = {'cellType': 'PyrCell', 'numCells': 800} +netParams.popParams['BC'] = {'cellType': 'BasketCell', 'numCells': 200} +netParams.popParams['OLM'] = {'cellType': 'OlmCell', 'numCells': 200} + + +############################################################################### +# Current-clamp to cells +############################################################################### +netParams.stimSourceParams['IClamp_PYR'] = {'type': 'IClamp', 'del': 2*0.1, 'dur': 1e9, 'amp': 50e-3} +netParams.stimSourceParams['IClamp_OLM'] = {'type': 'IClamp', 'del': 2*0.1, 'dur': 1e9, 'amp': -25e-3} + +netParams.stimTargetParams['IClamp_PYR->PYR'] = { + 'source': 'IClamp_PYR', + 'sec': 'soma', + 'loc': 0.5, + 'conds': {'pop': 'PYR'}} + +netParams.stimTargetParams['IClamp_OLM->OLM'] = { + 'source': 'IClamp_OLM', + 'sec': 'soma', + 'loc': 0.5, + 'conds': {'pop': 'OLM'}} + + +############################################################################### +# Setting connections +############################################################################### + +# PYR -> X, NMDA +netParams.connParams['PYR->BC_NMDA'] = {'preConds': {'pop': 'PYR'}, 'postConds': {'pop': 'BC'}, + 'convergence': 100, + 'weight': cfg.nmda['PYR->BC'], + 'delay': 2, + 'sec': 'soma', + 'loc': 0.5, + 'synMech': 'NMDA'} + +netParams.connParams['PYR->OLM_NMDA'] = {'preConds': {'pop': 'PYR'}, 'postConds': {'pop': 'OLM'}, + 'convergence': 10, + 'weight': cfg.nmda['PYR->OLM'], + 'delay': 2, + 'sec': 'soma', + 'loc': 0.5, + 'synMech': 'NMDA'} + +netParams.connParams['PYR->PYR_NMDA'] = {'preConds': {'pop': 'PYR'}, 'postConds': {'pop': 'PYR'}, + 'convergence': 25, + 'weight': cfg.nmda['PYR->PYR'], + 'delay': 2, + 'sec': 'Bdend', + 'loc': 1.0, + 'synMech': 'NMDA'} + +# PYR -> X, AMPA +netParams.connParams['PYR->BC_AMPA'] = {'preConds': {'pop': 'PYR'}, 'postConds': {'pop': 'BC'}, + 'convergence': 100, + 'weight': cfg.ampa['PYR->BC'], + 'delay': 2, + 'sec': 'soma', + 'loc': 0.5, + 'synMech': 'AMPAf'} + +netParams.connParams['PYR->OLM_AMPA'] = {'preConds': {'pop': 'PYR'}, 'postConds': {'pop': 'OLM'}, + 'convergence': 10, + 'weight': cfg.ampa['PYR->OLM'], + 'delay': 2, + 'sec': 'soma', + 'loc': 0.5, + 'synMech': 'AMPAf'} + +netParams.connParams['PYR->PYR_AMPA'] = {'preConds': {'pop': 'PYR'}, 'postConds': {'pop': 'PYR'}, + 'convergence': 25, + 'weight': cfg.ampa['PYR->PYR'], + 'delay': 2, + 'sec': 'Bdend', + 'loc': 1.0, + 'synMech': 'AMPAf'} + +# BC -> X, GABA +netParams.connParams['BC->BC_GABA'] = {'preConds': {'pop': 'BC'}, 'postConds': {'pop': 'BC'}, + 'convergence': 60, + 'weight': cfg.gaba['BC->BC'], + 'delay': 2, + 'sec': 'soma', + 'loc': 0.5, + 'synMech': 'GABAf'} + +netParams.connParams['BC->PYR_GABA'] = {'preConds': {'pop': 'BC'}, 'postConds': {'pop': 'PYR'}, + 'convergence': 50, + 'weight': cfg.gaba['BC->PYR'], + 'delay': 2, + 'sec': 'soma', + 'loc': 0.5, + 'synMech': 'GABAf'} + + +# OLM -> PYR, GABA +netParams.connParams['OLM->PYR_GABA'] = {'preConds': {'pop': 'OLM'}, 'postConds': {'pop': 'PYR'}, + 'convergence': 20, + 'weight': cfg.gaba['OLM->PYR'], + 'delay': 2, + 'sec': 'Adend2', + 'loc': 0.5, + 'synMech': 'GABAs'} + + +############################################################################### +# Setting NetStims +############################################################################### +# to PYR +netParams.stimSourceParams['NetStim_PYR_SOMA_AMPA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_PYR_SOMA_AMPA->PYR'] = { + 'source': 'NetStim_PYR_SOMA_AMPA', + 'conds': {'pop': 'PYR'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 4*0.05e-3, # different from published value + 'delay': 2*0.1, + 'synMech': 'AMPAf'} + +netParams.stimSourceParams['NetStim_PYR_ADEND3_AMPA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_PYR_ADEND3_AMPA->PYR'] = { + 'source': 'NetStim_PYR_ADEND3_AMPA', + 'conds': {'pop': 'PYR'}, + 'sec': 'Adend3', + 'loc': 0.5, + 'weight': 4*0.05e-3, # different from published value + 'delay': 2*0.1, + 'synMech': 'AMPAf'} + +netParams.stimSourceParams['NetStim_PYR_SOMA_GABA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_PYR_SOMA_GABA->PYR'] = { + 'source': 'NetStim_PYR_SOMA_GABA', + 'conds': {'pop': 'PYR'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 0.012e-3, + 'delay': 2*0.1, + 'synMech': 'GABAf'} + +netParams.stimSourceParams['NetStim_PYR_ADEND3_GABA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_PYR_ADEND3_GABA->PYR'] = { + 'source': 'NetStim_PYR_ADEND3_GABA', + 'conds': {'pop': 'PYR'}, + 'sec': 'Adend3', + 'loc': 0.5, + 'weight': 0.012e-3, + 'delay': 2*0.1, + 'synMech': 'GABAf'} + +netParams.stimSourceParams['NetStim_PYR_ADEND3_NMDA'] = {'type': 'NetStim', 'interval': 100, 'number': int((1000/100.0)*cfg.duration), 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_PYR_ADEND3_NMDA->PYR'] = { + 'source': 'NetStim_PYR_ADEND3_NMDA', + 'conds': {'pop': 'PYR'}, + 'sec': 'Adend3', + 'loc': 0.5, + 'weight': 6.5e-3, + 'delay': 2*0.1, + 'synMech': 'NMDA'} + +# to BC +netParams.stimSourceParams['NetStim_BC_SOMA_AMPA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_BC_SOMA_AMPA->BC'] = { + 'source': 'NetStim_BC_SOMA_AMPA', + 'conds': {'pop': 'BC'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 0.02e-3, + 'delay': 2*0.1, + 'synMech': 'AMPAf'} + +netParams.stimSourceParams['NetStim_BC_SOMA_GABA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_BC_SOMA_GABA->BC'] = { + 'source': 'NetStim_BC_SOMA_GABA', + 'conds': {'pop': 'BC'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 0.2e-3, + 'delay': 2*0.1, + 'synMech': 'GABAf'} + +# to OLM +netParams.stimSourceParams['NetStim_OLM_SOMA_AMPA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_OLM_SOMA_AMPA->OLM'] = { + 'source': 'NetStim_OLM_SOMA_AMPA', + 'conds': {'pop': 'OLM'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 0.0625e-3, + 'delay': 2*0.1, + 'synMech': 'AMPAf'} + +netParams.stimSourceParams['NetStim_OLM_SOMA_GABA'] = {'type': 'NetStim', 'interval': 1, 'number': 1000*cfg.duration, 'start': 0, 'noise': 1} +netParams.stimTargetParams['NetStim_OLM_SOMA_GABA->OLM'] = { + 'source': 'NetStim_OLM_SOMA_GABA', + 'conds': {'pop': 'OLM'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 0.2e-3, + 'delay': 2*0.1, + 'synMech': 'GABAf'} + +# Medial Septal inputs to BC and OLM cells +netParams.stimSourceParams['Septal'] = {'type': 'NetStim', 'interval': 150, 'number': int((1000/150)*cfg.duration), 'start': 0, 'noise': 0} +netParams.stimTargetParams['Septal->BC'] = { + 'source': 'Septal', + 'conds': {'pop': 'BC'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 1.6e-3, + 'delay': 2*0.1, + 'synMech': 'GABAss'} + +netParams.stimTargetParams['Septal->OLM'] = { + 'source': 'Septal', + 'conds': {'pop': 'OLM'}, + 'sec': 'soma', + 'loc': 0.5, + 'weight': 1.6e-3, + 'delay': 2*0.1, + 'synMech': 'GABAss'} diff --git a/netpyne/batchtools/examples/CA3/optuna_search.py b/netpyne/batchtools/examples/CA3/optuna_search.py new file mode 100644 index 000000000..3ee09881d --- /dev/null +++ b/netpyne/batchtools/examples/CA3/optuna_search.py @@ -0,0 +1,39 @@ +from netpyne.batchtools.search import search + +params = {'nmda.PYR->BC' : [1e-3, 1.8e-3], + #'nmda.PYR->OLM': [0.4e-3, 1.0e-3], + #'nmda.PYR->PYR': [0.001e-3, 0.007e-3], + 'ampa.PYR->BC' : [0.2e-3, 0.5e-3], + #'ampa.PYR->OLM': [0.2e-3, 0.5e-3], + #'ampa.PYR->PYR': [0.01e-3, 0.03e-3], + #'gaba.BC->BC' : [1e-3, 7e-3], + 'gaba.BC->PYR' : [0.4e-3, 1.0e-3], + #'gaba.OLM->PYR': [40e-3, 100e-3], + } + +# use batch_shell_config if running directly on the machine +shell_config = {'command': 'mpiexec -np 4 nrniv -python -mpi init.py',} + +# use batch_sge_config if running on a +sge_config = { + 'queue': 'cpu.q', + 'cores': 5, + 'vmem': '4G', + 'realtime': '00:30:00', + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py'} + + +run_config = sge_config + +search(job_type = 'sge', # or sh + comm_type = 'socket', + label = 'optuna', + params = params, + output_path = '../optuna_batch', + checkpoint_path = '../ray', + run_config = run_config, + num_samples = 27, + metric = 'loss', + mode = 'min', + algorithm = 'optuna', + max_concurrent = 3) diff --git a/netpyne/batchtools/examples/categorical_strings/batch.py b/netpyne/batchtools/examples/categorical_strings/batch.py new file mode 100644 index 000000000..d8cf9ef17 --- /dev/null +++ b/netpyne/batchtools/examples/categorical_strings/batch.py @@ -0,0 +1,18 @@ +from netpyne.batchtools.search import search + +params = { + 'param_str': [ 'string0', 'string1', 'string2' ] + } + +search(job_type = 'sh', + comm_type = 'socket', + label = 'categorical', + params = params, + output_path = '../grid_batch', + checkpoint_path = '../ray', + run_config = {'command': 'python categorical.py'}, + num_samples = 1, + metric = 'return', + mode = 'min', + algorithm = 'variant_generator', + max_concurrent = 3) diff --git a/netpyne/batchtools/examples/categorical_strings/categorical.py b/netpyne/batchtools/examples/categorical_strings/categorical.py new file mode 100644 index 000000000..622e90ca1 --- /dev/null +++ b/netpyne/batchtools/examples/categorical_strings/categorical.py @@ -0,0 +1,26 @@ +from netpyne.batchtools import specs, comm +import json + + +# ----------- cfg creation & batch update ----------- # + +cfg = specs.SimConfig() + +cfg.simLabel = 'categorical' +cfg.saveFolder = '.' + +cfg.param_str = ['default'] + +cfg.update_cfg() + +# --------------------------------------------------- # + +# comm creation, calculation and result transmission # +comm.initialize() + +out_json = json.dumps({'return': 0, 'param_str': str(cfg.param_str), 'type': str(type(cfg.param_str))}) +if comm.is_host(): + print(out_json) + comm.send(out_json) + comm.close() + diff --git a/netpyne/batchtools/examples/cfg_loading/batch.py b/netpyne/batchtools/examples/cfg_loading/batch.py new file mode 100644 index 000000000..d8cf9ef17 --- /dev/null +++ b/netpyne/batchtools/examples/cfg_loading/batch.py @@ -0,0 +1,18 @@ +from netpyne.batchtools.search import search + +params = { + 'param_str': [ 'string0', 'string1', 'string2' ] + } + +search(job_type = 'sh', + comm_type = 'socket', + label = 'categorical', + params = params, + output_path = '../grid_batch', + checkpoint_path = '../ray', + run_config = {'command': 'python categorical.py'}, + num_samples = 1, + metric = 'return', + mode = 'min', + algorithm = 'variant_generator', + max_concurrent = 3) diff --git a/netpyne/batchtools/examples/cfg_loading/categorical.csv b/netpyne/batchtools/examples/cfg_loading/categorical.csv new file mode 100644 index 000000000..e2ad391a6 --- /dev/null +++ b/netpyne/batchtools/examples/cfg_loading/categorical.csv @@ -0,0 +1,13 @@ +,data,return,timestamp,checkpoint_dir_name,done,training_iteration,trial_id,date,time_this_iter_s,time_total_s,pid,hostname,node_ip,time_since_restore,iterations_since_restore,config/param_str,config/saveFolder,config/simLabel,logdir +0,"return 0 +param_str string0 +type +dtype: object",0,1724028563,,False,1,de3ff_00000,2024-08-18_19-49-23,1.7559211254119873,1.7559211254119873,87347,Jamess-MacBook-Pro.local,127.0.0.1,1.7559211254119873,1,string0,../grid_batch,categorical_00000,de3ff_00000 +1,"return 0 +param_str string1 +type +dtype: object",0,1724028563,,False,1,de3ff_00001,2024-08-18_19-49-23,1.754591941833496,1.754591941833496,87348,Jamess-MacBook-Pro.local,127.0.0.1,1.754591941833496,1,string1,../grid_batch,categorical_00001,de3ff_00001 +2,"return 0 +param_str string2 +type +dtype: object",0,1724028563,,False,1,de3ff_00002,2024-08-18_19-49-23,1.7577638626098633,1.7577638626098633,87349,Jamess-MacBook-Pro.local,127.0.0.1,1.7577638626098633,1,string2,../grid_batch,categorical_00002,de3ff_00002 diff --git a/netpyne/batchtools/examples/cfg_loading/categorical.py b/netpyne/batchtools/examples/cfg_loading/categorical.py new file mode 100644 index 000000000..3a8005872 --- /dev/null +++ b/netpyne/batchtools/examples/cfg_loading/categorical.py @@ -0,0 +1,13 @@ +from netpyne.batchtools import specs # import the custom batch specs +cfg = specs.SimConfig({'type': 0}) # create a SimConfig object, initializes it with a dictionary {'type': 0} such that +print("cfg.type={}".format(cfg.type)) # cfg.type == 0 +try: + cfg.update({'typo': 1}, force_match=True) # cfg.typo is not defined, so this line will raise an AttributeError +except AttributeError as e: + print(e) +cfg.update({'typo': 1}) # without force_match, the typo attribute cfg.fooo is created and set to 1 +print("cfg.type={}".format(cfg.type)) # cfg.type remains unchanged due to a typo in the attribute name 'type' -> 'typo' +print("cfg.typo={}".format(cfg.typo)) # instead, cfg.typo is created and set to the value 1 + +cfg.test_mappings({'type': 0}) # this will return True, as the mappings are valid +cfg.test_mappings({'missing': 1}) # this will raise an AttributeError, as the 'missing' attribute is not defined \ No newline at end of file diff --git a/netpyne/batchtools/examples/jupyter/batchtools.ipynb b/netpyne/batchtools/examples/jupyter/batchtools.ipynb new file mode 100644 index 000000000..22fe3489f --- /dev/null +++ b/netpyne/batchtools/examples/jupyter/batchtools.ipynb @@ -0,0 +1,314 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "Jupyter Tutorial: The NetPyNE batchtools subpackage\n", + "How to use the `specs` and `comm` to communicate with the `batchtools` `dispatcher`\n" + ], + "metadata": { + "collapsed": false + }, + "id": "89ec6ca2392a9a0d" + }, + { + "cell_type": "markdown", + "source": [ + "For each individual `sim`, communication with the `batchtools` `dispatcher` occurs through the `specs` and `comm` objects" + ], + "metadata": { + "collapsed": false + }, + "id": "be50f40d8e61a944" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "from netpyne.batchtools import specs, comm" + ], + "metadata": { + "collapsed": false + }, + "id": "6f321aedb7faf945", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "the `specs` object is an instantiation of a custom class extending the `batchtk` `Runner` ..." + ], + "metadata": { + "collapsed": false + }, + "id": "5f2f08f0b5e582c3" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "help(type(specs))" + ], + "metadata": { + "collapsed": false + }, + "id": "29fa261236494bc3", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "From this `specs` object, we can similarly call `specs.NetParams` and `specs.SimConfig` to create the NetPyNE objects..." + ], + "metadata": { + "collapsed": false + }, + "id": "64ead24451bbad4a" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "help(specs.NetParams)\n", + "help(specs.SimConfig)" + ], + "metadata": { + "collapsed": false + }, + "id": "43d263d080800019", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The `batchtools` job submission tool uses `environmental variables` to pass values to our `config` object created by `specs.SimConfig`, these `environmental variables` are captured during the `specs` `object creation` which occurs during the batchtools `import` (from the `batchtools` `__init__.py`:\n", + "```\n", + "from netpyne.batchtools.runners import NetpyneRunner\n", + "specs = NetpyneRunner()\n", + "```" + ], + "metadata": { + "collapsed": false + }, + "id": "710cc6084bd7af02" + }, + { + "cell_type": "markdown", + "source": [ + "Let's `export` some `environmental variables` to pass values to our `config` object. When this is handled by the `batchtools` `subpackage`, this occurs automatically..." + ], + "metadata": { + "collapsed": false + }, + "id": "52704684f5e80f3c" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "%env STRRUNTK0 =foo.bar=baz\n", + "%env FLOATRUNTK1 =float_val=7.7\n", + "from netpyne.batchtools import NetpyneRunner\n", + "specs = NetpyneRunner()" + ], + "metadata": { + "collapsed": false + }, + "id": "50de117ff7f43aa6", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "One way of retrieving these values is by calling `specs.get_mappings()`" + ], + "metadata": { + "collapsed": false + }, + "id": "fac14e517044b980" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(specs.get_mappings())" + ], + "metadata": { + "collapsed": false + }, + "id": "257fad390f4abce", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now, let's create our `config` object using the `specs.SimConfig()` constructor\n", + "This `config` object will hold a `dictionary` such that the initial values `foo['bar']` = `not_baz` and a `float_val` = `3.3`" + ], + "metadata": { + "collapsed": false + }, + "id": "92d41061bb828744" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "cfg = specs.SimConfig()\n", + "cfg.foo = {'bar': 'not_baz', 'qux': 'quux'}\n", + "cfg.float_val = 3.3\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))" + ], + "metadata": { + "collapsed": false + }, + "id": "ca121d6ab30c3e7b", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Finally, calling the `cfg.update_cfg()` method will overwrite the original values with our environment values, (`baz` and `7.7`)...\n", + "\n", + "in NetPyNE, this was originally handled with the:\n", + "```\n", + "try:\n", + " from __main__ import cfg\n", + "except:\n", + " from cfg import cfg\n", + "```\n", + "API idiom in the `netParams.py` file...\n", + " \n", + "as well as the \n", + "```\n", + "cfg, netParams = sim.readCmdLineArgs(simConfigDefault='src/cfg.py', netParamsDefault='src/netParams.py')\n", + "```\n", + "API idiom in the `init.py` file...\n", + "\n", + "using the `batchtools` subpackage, we can treat the `cfg` as an object and pass it between scripts via `import` statements...\n", + "in `netParams.py`...\n", + "```\n", + "from cfg import cfg\n", + "cfg.update()\n", + "```\n", + "in `init.py`...\n", + "```\n", + "from netParams import cfg, netParams\n", + "sim.createSimulateAnalyze(simConfig=cfg, netParams=netParams)\n", + "```" + ], + "metadata": { + "collapsed": false + }, + "id": "6ea43f729d0685d4" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(\"prior to cfg.update()\")\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))\n", + "print()\n", + "cfg.update() # call update_cfg to update values in the cfg object with values assigned by batch\n", + "print(\"after the cfg.update()\")\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))" + ], + "metadata": { + "collapsed": false + }, + "id": "a9426b6e6594961", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Finally, the `comm object` is used to report to the monitoring `dispatcher object`\n", + "the means of communication is dependent on which `dispatcher object` is instantiated, and communicated through environmental variables\n", + "in this case, since there is no `dispatcher object` the `comm` methods will simply perform `pass operations`" + ], + "metadata": { + "collapsed": false + }, + "id": "65bbb0ef2c76295a" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.initialize() # initializes comm object, establishing channel to communicate with the host dispatcher object" + ], + "metadata": { + "collapsed": false + }, + "id": "e9141d91d6e02aa3", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(comm.is_host()) # returns a boolean IF the calling process is the 0th ranked parallelcontext, similar to sim.pc.rank == 0" + ], + "metadata": { + "collapsed": false + }, + "id": "5ed6a524bd8a3e0b", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.send('message') # sends 'message' to the `dispatcher object`" + ], + "metadata": { + "collapsed": false + }, + "id": "1966edbf32649352", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.close() #finalizes communication, closes any resources used to communicate with the `dispatcher object`" + ], + "metadata": { + "collapsed": false + }, + "id": "34f021af4127363c" + }, + { + "cell_type": "markdown", + "source": [], + "metadata": { + "collapsed": false + }, + "id": "648746fff96b8a72" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/batch.py b/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/batch.py new file mode 100644 index 000000000..ed6a05474 --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/batch.py @@ -0,0 +1,21 @@ +from netpyne.batchtools.search import search + +params = {'x0': [0, 3], + 'x1': [0, 3] + } + +# use shell_config if running directly on the machine +shell_config = {'command': 'python rosenbrock.py',} + +search(job_type = 'sh', # or sh + comm_type = 'socket', + label = 'optuna', + params = params, + output_path = '../optuna_batch', + checkpoint_path = '../ray', + run_config = {'command': 'python rosenbrock.py'}, + num_samples = 9, + metric = 'fx', + mode = 'min', + algorithm = 'optuna', + max_concurrent = 3) diff --git a/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/cma_batch.py b/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/cma_batch.py new file mode 100644 index 000000000..9bde349c6 --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/cma_batch.py @@ -0,0 +1,31 @@ +from batchtk.runtk.trial import trial, LABEL_POINTER, PATH_POINTER +from netpyne.batchtools import dispatchers, submits +from cmaes import CMA +import numpy +import os +Dispatcher = dispatchers.INETDispatcher +Submit = submits.SHSubmitSOCK + +cwd = os.getcwd() + +def eval_rosenbrock(x0, x1, tid): + cfg = { + 'x0': x0, + 'x1': x1, + } + submit = Submit() + submit.update_templates(**{'command': 'python rosenbrock.py',}) + label = 'rosenbrock' + return float(trial(cfg, label, tid, Dispatcher, cwd, '../cma', submit)['fx']) + +#data = eval_rosenbrock(1, 1, "x11") + +optimizer = CMA(mean=numpy.zeros(2), sigma=1.0) +for generation in range(3): + solutions = [] + for cand in range(optimizer.population_size): + x = optimizer.ask() + value = eval_rosenbrock(x[0], x[1], "{}_{}".format(cand, generation)) + solutions.append((x, value)) + print(f"#{generation} {value} (x1={x[0]}, x2 = {x[1]})") + optimizer.tell(solutions) diff --git a/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/rosenbrock.py b/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/rosenbrock.py new file mode 100644 index 000000000..52fa35e86 --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/basic_rosenbrock/rosenbrock.py @@ -0,0 +1,37 @@ +from netpyne.batchtools import specs, comm +import json + +# ---- Rosenbrock Function & Constant Definition ---- # + +""" +The rosenbrock minimum is at (A, A**2), where rosenbrock(A, A**2) = 0 +""" +def rosenbrock(x0, x1): + return 100 * (x1 - x0**2)**2 + (A - x0)**2 + +A = 1 +# --------------------------------------------------- # + +# ----------- cfg creation & batch update ----------- # + +cfg = specs.SimConfig() + +cfg.simLabel = 'rosenbrock' +cfg.saveFolder = '.' + +cfg.x0 = 1 +cfg.x1 = 1 + +cfg.update_cfg() + +# --------------------------------------------------- # + +# comm creation, calculation and result transmission # +comm.initialize() + +out_json = json.dumps({'x0': cfg.x0, 'x1': cfg.x1, 'fx': rosenbrock(cfg.x0, cfg.x1)}) +if comm.is_host(): + print(out_json) + comm.send(out_json) + comm.close() + diff --git a/netpyne/batchtools/examples/rosenbrock/coupled_rosenbrock/batch.py b/netpyne/batchtools/examples/rosenbrock/coupled_rosenbrock/batch.py new file mode 100644 index 000000000..350c99084 --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/coupled_rosenbrock/batch.py @@ -0,0 +1,24 @@ +from netpyne.batchtools.search import search +import numpy +x0 = numpy.arange(0, 3) +x1 = x0**2 + +x0_x1 = [*zip(x0.tolist(), x1.tolist())] +params = {'x0_x1': x0_x1 + } + +# use shell_config if running directly on the machine +shell_config = {'command': 'python rosenbrock.py',} + +search(job_type = 'sh', # or sh + comm_type = 'socket', + label = 'grid', + params = params, + output_path = '../grid_batch', + checkpoint_path = '../ray', + run_config = {'command': 'python rosenbrock.py'}, + num_samples = 1, + metric = 'fx', + mode = 'min', + algorithm = 'variant_generator', + max_concurrent = 3) \ No newline at end of file diff --git a/netpyne/batchtools/examples/rosenbrock/coupled_rosenbrock/rosenbrock.py b/netpyne/batchtools/examples/rosenbrock/coupled_rosenbrock/rosenbrock.py new file mode 100644 index 000000000..cc957b9d3 --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/coupled_rosenbrock/rosenbrock.py @@ -0,0 +1,41 @@ +from netpyne.batchtools import specs, comm +import json + +# ---- Rosenbrock Function & Constant Definition ---- # + +""" +The rosenbrock minimum is at (A, A**2), where rosenbrock(A, A**2) = 0 +""" +def rosenbrock(x0, x1): + return 100 * (x1 - x0**2)**2 + (A - x0)**2 + +A = 1 +# --------------------------------------------------- # + +# ----------- cfg creation & batch update ----------- # + +cfg = specs.SimConfig() + +cfg.simLabel = 'rosenbrock' +cfg.saveFolder = '.' + +cfg.x0_x1 = [1, 1] + +cfg.update_cfg() + +# --------------------------------------------------- # + +# -------------- unpacking x0_x1 list -------------- # +x0, x1 = cfg.x0_x1 +# --------------------------------------------------- # + + +# comm creation, calculation and result transmission # +comm.initialize() + +out_json = json.dumps({'x0': x0, 'x1': x1, 'fx': rosenbrock(x0, x1)}) +if comm.is_host(): + print(out_json) + comm.send(out_json) + comm.close() + diff --git a/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/analysis.py b/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/analysis.py new file mode 100644 index 000000000..32914a419 --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/analysis.py @@ -0,0 +1,6 @@ +from netpyne.batchtools.analysis import Analyzer + +analyzer = Analyzer(params = ['x.0', 'x.1', 'x.2', 'x.3'], metrics = ['fx']) +analyzer.load_file('optuna.csv') +results = analyzer.run_analysis() + diff --git a/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/batch.py b/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/batch.py new file mode 100644 index 000000000..d6229949d --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/batch.py @@ -0,0 +1,24 @@ +from netpyne.batchtools.search import search +import numpy + +params = {'x.0': numpy.linspace(-1, 3, 5), + 'x.1': numpy.linspace(-1, 3, 5), + 'x.2': numpy.linspace(-1, 3, 5), + 'x.3': numpy.linspace(-1, 3, 5), + } + +# use shell_config if running directly on the machine +shell_config = {'command': 'python rosenbrock.py',} + +search(job_type = 'sh', # or sh + comm_type = 'socket', + label = 'grid', + params = params, + output_path = '../grid_batch', + checkpoint_path = '../ray', + run_config = {'command': 'python rosenbrock.py'}, + num_samples = 1, + metric = 'fx', + mode = 'min', + algorithm = 'variant_generator', + max_concurrent = 3) diff --git a/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/rosenbrock.py b/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/rosenbrock.py new file mode 100644 index 000000000..e6ce3b29d --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/fanova_rosenbrock/rosenbrock.py @@ -0,0 +1,49 @@ +from netpyne.batchtools import specs, comm +import json + +# --- Rosenbrock Functions & Constant Definitions --- # + +""" +The rosenbrock_v0 (coupled rosenbrock) +""" + +A = 1 + + +def rosenbrock_v0(*args): + if len(args) % 2: + raise ValueError('rosenbrock_v0 requires an even number of arguments') + return sum(100 * (args[i]**2 - args[i+1])**2 + (args[i] - A)**2 for i in range(0, len(args), 2)) + + +""" +The rosenbrock_v1 +""" + + +def rosenbrock_v1(*args): + return sum(100 * (args[i+1] - args[i]**2)**2 + (A - args[i])**2 for i in range(0, len(args))) + + +# --------------------------------------------------- # + +# ----------- cfg creation & batch update ----------- # + +cfg = specs.SimConfig({'x': [None] * 4}) + +cfg.simLabel = 'rosenbrock' +cfg.saveFolder = '.' + +cfg.update_cfg() + +# --------------------------------------------------- # + +# comm creation, calculation and result transmission # +comm.initialize() + +out_json = json.dumps({'x': cfg.x, 'fx': rosenbrock_v0(*cfg.x)}) +if comm.is_host(): + print(out_json) + comm.send(out_json) + comm.close() + diff --git a/netpyne/batchtools/examples/rosenbrock/nested_rosenbrock/batch.py b/netpyne/batchtools/examples/rosenbrock/nested_rosenbrock/batch.py new file mode 100644 index 000000000..cf35c487d --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/nested_rosenbrock/batch.py @@ -0,0 +1,23 @@ +from netpyne.batchtools.search import search +import numpy + + +params = {'xn.0': numpy.arange(0, 5), + 'xn.1': numpy.arange(0, 5) + } + +# use shell_config if running directly on the machine +shell_config = {'command': 'python rosenbrock.py',} + +search(job_type = 'sh', # or sh + comm_type = 'socket', + label = 'grid', + params = params, + output_path = '../grid_batch', + checkpoint_path = '../ray', + run_config = {'command': 'python rosenbrock.py'}, + num_samples = 1, + metric = 'fx', + mode = 'min', + algorithm = 'variant_generator', + max_concurrent = 3) \ No newline at end of file diff --git a/netpyne/batchtools/examples/rosenbrock/nested_rosenbrock/rosenbrock.py b/netpyne/batchtools/examples/rosenbrock/nested_rosenbrock/rosenbrock.py new file mode 100644 index 000000000..b1956b2db --- /dev/null +++ b/netpyne/batchtools/examples/rosenbrock/nested_rosenbrock/rosenbrock.py @@ -0,0 +1,41 @@ +from netpyne.batchtools import specs, comm +import json + +# ---- Rosenbrock Function & Constant Definition ---- # + +""" +The rosenbrock minimum is at (A, A**2), where rosenbrock(A, A**2) = 0 +""" +def rosenbrock(x0, x1): + return 100 * (x1 - x0**2)**2 + (A - x0)**2 + +A = 1 +# --------------------------------------------------- # + +# ----------- cfg creation & batch update ----------- # + +cfg = specs.SimConfig() + +cfg.simLabel = 'rosenbrock' +cfg.saveFolder = '.' + +cfg.xn = [1, 1] + +cfg.update_cfg() + +# --------------------------------------------------- # + +# ---------------- unpacking x list ---------------- # +x0, x1 = cfg.xn +# --------------------------------------------------- # + + +# comm creation, calculation and result transmission # +comm.initialize() + +out_json = json.dumps({'x0': x0, 'x1': x1, 'fx': rosenbrock(x0, x1)}) +if comm.is_host(): + print(out_json) + comm.send(out_json) + comm.close() + diff --git a/netpyne/batchtools/header.py b/netpyne/batchtools/header.py new file mode 100644 index 000000000..e69de29bb diff --git a/netpyne/batchtools/runners.py b/netpyne/batchtools/runners.py new file mode 100644 index 000000000..8289c8421 --- /dev/null +++ b/netpyne/batchtools/runners.py @@ -0,0 +1,266 @@ +#from batchtk.runtk.utils import convert, set_map, create_script +from batchtk import runtk +from batchtk.runtk.runners import Runner, get_class +import collections +import os +import collections + +def validate(element, container): + try: + match container: + case list(): #container is a list, check validity of index + assert int(element) < len(container) + case dict(): #container is a dictionary, check validity of key + assert element in container + case _: #invalid container type + assert element in container + #raise AttributeError("container type is not supported, cfg attributes support dictionary and " + # "list objects, container {} is of type {}".format(container, type(container))) + except Exception as e: + raise AttributeError("error when validating {} within container {}: {}".format(element, container, e)) + return True #element is valid, return True for boolean + +""" +def set_map(self, assign_path, value, force_match=False): + assigns = assign_path.split('.') + if len(assigns) == 1 and not (force_match and not validate(assigns[0], self)): + self.__setitem__(assigns[0], value) + return + crawler = self.__getitem__(assigns[0]) + for gi in assigns[1:-1]: + if not (force_match and not validate(gi, crawler)): + try: + crawler = crawler.__getitem__(gi) + except TypeError: # case for lists. + crawler = crawler.__getitem__(int(gi)) + if not (force_match and not validate(assigns[-1], crawler)): + try: + crawler.__setitem__(assigns[-1], value) + except TypeError: + crawler.__setitem__(int(assigns[-1]), value) + return +""" + + +def traverse(obj, path, force_match=False): + if len(path) == 1: + if not (force_match and not validate(path[0], obj)): + return obj + if not (force_match and not validate(path[0], obj)): + try: + crawler = obj.__getitem__(path[0]) + except TypeError: # use for indexing into a list or in case the dictionary entry? is an int. + crawler = obj.__getitem__(int(path[0])) + return traverse(crawler, path[1:], force_match) + +def set_map(self, assign_path, value, force_match=False): + assigns = assign_path.split('.') + traverse(self, assigns, force_match)[assigns[-1]] = value + +def get_map(self, assign_path, force_match=False): + assigns = assign_path.split('.') + return traverse(self, assigns, force_match)[assigns[-1]] + +def update_items(d, u, force_match = False): + for k, v in u.items(): + try: + force_match and validate(k, d) + if isinstance(v, collections.abc.Container): + d[k] = update_items(d.get(k), v, force_match) + else: + d[k] = v + except Exception as e: + raise AttributeError("Error when calling update_items with force_match, item {} does not exist".format(k)) + return d + +class NetpyneRunner(Runner): + """ + runner for netpyne + see class runner + mappings <- + """ + def __new__(cls, inherit=None, **kwargs): + _super = get_class(inherit) + + def __init__(self, netParams=None, cfg=None, **kwargs): + """ + NetpyneRunner constructor + + Parameters + ---------- + self - NetpyneRunner instance + netParams - optional netParams instance (defaults to None, created with method: get_NetParams) + cfg - optional SimConfig instance (defaults to None, created with method: get_SimConfig) + N.B. requires cfg with the update_cfg method. see in get_SimConfig: + self.cfg = type("Runner_SimConfig", (specs.SimConfig,), + {'__mappings__': self.mappings, + 'update_cfg': update_cfg})() + kwargs - Unused + """ + _super.__init__(self, **kwargs) + self.netParams = netParams + self.cfg = cfg + + def _set_inheritance(self, inherit): + """ + Method for changing inheritance of NetpyneRunner + see runtk.RUNNERS + Parameters + ---------- + self + inherit + """ + if inherit in runtk.RUNNERS: + cls = type(self) + cls.__bases__ = (runtk.RUNNERS[inherit],) + else: + raise KeyError("inheritance {} not found in runtk.RUNNERS (please check runtk.RUNNERS for valid strings...".format(inherit)) + + + def get_NetParams(self, netParamsDict=None): + """ + Creates / Returns a NetParams instance + Parameters + ---------- + self + netParamsDict - optional dictionary to create NetParams instance (defaults to None) + - to be called during initial function call only + + Returns + ------- + NetParams instance + + """ + if self.netParams: + return self.netParams + else: + from netpyne import specs + self.netParams = specs.NetParams(netParamsDict) + return self.netParams + + def test_mappings(self, mappings): + """ + Tests mappings for validity + + Parameters + ---------- + mappings - dictionary of mappings to test + + Returns + ------- + bool - True if mappings are valid, False otherwise + """ + for assign_path, value in mappings.items(): + try: + set_map(self, assign_path, value, force_match=True) + print("successfully assigned: cfg.{} with value: {}".format(assign_path, value)) + except Exception as e: + raise Exception("failed on mapping: cfg.{} with value: {}\n{}".format(assign_path, value, e)) + return True + + + def update_cfg(self, simConfigDict=None, force_match=False): #intended to take `cfg` instance as self + """ + Updates the SimConfig instance with mappings to the runner, called from a SimConfig instance + + Parameters + ---------- + self - specs (NetpyneRunner) SimConfig instance + simConfigDict - optional dictionary to update SimConfig instance (defaults to None) + - to be called during initial function call only + + Returns + ------- + None (updates SimConfig instance in place) + """ + if simConfigDict: + update_items(self, simConfigDict, force_match) + for assign_path, value in self.__mappings__.items(): + try: + set_map(self, assign_path, value) + except Exception as e: + raise Exception("failed on mapping: cfg.{} with value: {}\n{}".format(assign_path, value, e)) + + def get_SimConfig(self, simConfigDict=None): + """ + Creates / Returns a SimConfig instance + Parameters + ---------- + self - NetpyneRunner instance + simConfigDict - optional dictionary to create NetParams instance (defaults to None) + - to be called during initial function call only + + Returns + ------- + SimConfig instance + """ + if self.cfg: + if simConfigDict: + update_items(self.cfg,simConfigDict, force_match=False) + return self.cfg + else: + from netpyne import specs + self.cfg = type("Runner_SimConfig", (specs.SimConfig,), + {'__mappings__': self.mappings, + 'update_cfg': update_cfg, + 'update': update_cfg, + 'test_mappings': test_mappings})(simConfigDict) + return self.cfg + + def set_SimConfig(self): + """ + updates the SimConfig instance with mappings to the runner, called from a Runner instance + + Parameters + ---------- + self + """ + # assumes values are only in 'cfg' + for assign_path, value in self.mappings.items(): + try: + set_map(self, "cfg.{}".format(assign_path), value) + except Exception as e: + raise Exception("failed on mapping: cfg.{} with value: {}\n{}".format(assign_path, value, e)) + + def set_mappings(self, filter=''): + # arbitrary filter, can work with 'cfg' or 'netParams' + for assign_path, value in self.mappings.items(): + if filter in assign_path: + set_map(self, assign_path, value) + + return type("NetpyneRunner{}".format(str(_super.__name__)), (_super,), + {'__init__': __init__, + '_set_inheritance': _set_inheritance, + 'get_NetParams': get_NetParams, + 'NetParams': get_NetParams, + 'SimConfig': get_SimConfig, + 'get_SimConfig': get_SimConfig, + 'set_SimConfig': set_SimConfig, + 'set_mappings': set_mappings, + 'test_mappings': test_mappings})(**kwargs) # need to override __init__ or else will call parent + +# use this test_list to check set_map .... +test_list = { + 'lists_of_dicts': [ + {'a': 0, 'b': 1, 'c': 2}, + {'d': 3, 'e': 4, 'f': 5}, + {'g': 6, 'h': 7, 'i': 8} + ], + 'dict_of_lists': { + 'a': [0, 1, 2], + 'b': [3, 4, 5], + 'c': [6, 7, 8] + }, + 'dict_of_dicts': { + 0: {'a': 0, 'b': 1, 'c': 2}, + 1: {'d': 3, 'e': 4, 'f': 5}, + 2: {'g': 6, 'h': 7, 'i': 8} + } +} + +""" +Test statements +In [3]: set_map(test_list, 'lists_of_dicts.0.a', 'a', force_match = True) +In [4]: set_map(test_list, 'lists_of_dicts.0.a', 0, force_match = True) +In [5]: set_map(test_list, 'lists_of_dicts.0.d', 0, force_match = True) +""" \ No newline at end of file diff --git a/netpyne/batchtools/search.py b/netpyne/batchtools/search.py new file mode 100644 index 000000000..1b791c1d4 --- /dev/null +++ b/netpyne/batchtools/search.py @@ -0,0 +1,435 @@ +import ray +import pandas +import os +from ray import tune, train +from ray.air import session, RunConfig +from ray.tune.search.basic_variant import BasicVariantGenerator +from ray.tune.search import create_searcher, ConcurrencyLimiter, SEARCH_ALG_IMPORT +from netpyne.batchtools import runtk +from collections import namedtuple +from batchtk.raytk.search import ray_trial, LABEL_POINTER +from batchtk.utils import get_path +import numpy +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + + + +choice = tune.choice +grid = tune.grid_search +uniform = tune.uniform + +def ray_optuna_search(dispatcher_constructor: Callable, # constructor for the dispatcher (e.g. INETDispatcher) + submit_constructor: Callable, # constructor for the submit (e.g. SHubmitSOCK) + run_config: Dict, # batch configuration, (keyword: string pairs to customize the submit template) + params: Dict, # search space (dictionary of parameter keys: tune search spaces) + label: Optional[str] = 'optuna_search', # label for the search + output_path: Optional[str] = '../batch', # directory for storing generated files + checkpoint_path: Optional[str] = '../ray', # directory for storing checkpoint files + max_concurrent: Optional[int] = 1, # number of concurrent trials to run at one time + batch: Optional[bool] = True, # whether concurrent trials should run synchronously or asynchronously + num_samples: Optional[int] = 1, # number of trials to run + metric: Optional[str|list|tuple] = "loss", # metric to optimize (this should match some key: value pair in the returned data + mode: Optional[str|list|tuple] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric + optuna_config: Optional[dict] = None, # additional configuration for the optuna search algorithm + ) -> namedtuple('Study', ['algo', 'results']): + """ + ray_optuna_search(...) + + Parameters + ---------- + dispatcher_constructor:Callable, # constructor for the dispatcher (e.g. INETDispatcher) + submit_constructor:Callable, # constructor for the submit (e.g. SHubmitSOCK) + run_config:Dict, # batch configuration, (keyword: string pairs to customize the submit template) + params:Dict, # search space (dictionary of parameter keys: tune search spaces) + label:Optional[str] = 'optuna_search', # label for the search + output_path:Optional[str] = '../batch', # directory for storing generated files + checkpoint_path:Optional[str] = '../ray', # directory for storing checkpoint files + max_concurrent:Optional[int] = 1, # number of concurrent trials to run at one time + batch:Optional[bool] = True, # whether concurrent trials should run synchronously or asynchronously + num_samples:Optional[int] = 1, # number of trials to run + metric:Optional[str] = "loss", # metric to optimize (this should match some key: value pair in the returned data + mode:Optional[str] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric + optuna_config:Optional[dict] = None, # additional configuration for the optuna search algorithm (incl. sampler, seed, etc.) + + Creates + ------- +