From c614e57d866393b2c0f6a6aa21c7aec7661bc28e Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Thu, 3 Aug 2023 16:35:33 +0200
Subject: [PATCH 01/18] Remove outdated file
---
Analyze_optimization_results.ipynb | 98 ------------------------------
1 file changed, 98 deletions(-)
delete mode 100644 Analyze_optimization_results.ipynb
diff --git a/Analyze_optimization_results.ipynb b/Analyze_optimization_results.ipynb
deleted file mode 100644
index ddc3f964..00000000
--- a/Analyze_optimization_results.ipynb
+++ /dev/null
@@ -1,98 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Overview\n",
- "\n",
- "This notebook analyses and visualizes the results of a `optimas` optimization."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "import matplotlib.pyplot as plt\n",
- "from optimas.post_processing import PostProcOptimization\n",
- "%matplotlib notebook"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Modify the path below, to point to your optimization\n",
- "pp = PostProcOptimization('./optimization_folder/test_mf_disc/')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "pp.get_df() "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "pp.plot_optimization()\n",
- "t0 = np.linspace(0, pp.get_df()['returned_time'].max(), 200 )\n",
- "pp.get_trace(plot=True, t_array=t0);"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "plt.figure()\n",
- "pp.plot_worker_timeline( fidelity_parameter='resolution' )"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "libe_env",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.15"
- },
- "vscode": {
- "interpreter": {
- "hash": "bfb1269c5d150bb39b3cc7af346375a5d6d04845d8cc3a7072c5122e27bb644e"
- }
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
From 81dc8bfc583064817f4ba1901768bc6de61c5a64 Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Thu, 3 Aug 2023 16:38:02 +0200
Subject: [PATCH 02/18] Remove outdated submission scripts
---
submission_scripts/juwels | 26 --------------------------
submission_scripts/lawrencium | 19 -------------------
submission_scripts/lawrencium_1080ti | 19 -------------------
submission_scripts/perlmutter | 18 ------------------
submission_scripts/summit | 27 ---------------------------
5 files changed, 109 deletions(-)
delete mode 100644 submission_scripts/juwels
delete mode 100644 submission_scripts/lawrencium
delete mode 100644 submission_scripts/lawrencium_1080ti
delete mode 100644 submission_scripts/perlmutter
delete mode 100644 submission_scripts/summit
diff --git a/submission_scripts/juwels b/submission_scripts/juwels
deleted file mode 100644
index 9e8d9d06..00000000
--- a/submission_scripts/juwels
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash -x
-#SBATCH -J {name}
-#SBATCH --nodes={n_nodes}
-#SBATCH --ntasks-per-node=4
-#SBATCH --output=mpi-out.%j
-#SBATCH --error=mpi-err.%j
-#SBATCH --time={max_time}
-#SBATCH --partition=booster
-#SBATCH --account=plasmabbq
-#SBATCH --gres=gpu:4
-
-source activate optimization
-
-export NUMBA_NUM_THREADS=1
-export OMP_NUM_THREADS=1
-export FBPIC_ENABLE_GPUDIRECT=0
-export CUDA_VISIBLE_DEVICES=""
-
-# Script to run libEnsemble using multiprocessing on launch nodes.
-# Assumes Conda environment is set up.
-
-export LIBE_SIM_EXTRA_ARGS="--gres=gpu:1"
-
-python run_example.py --comms local --nworkers {n_workers}
-
-
diff --git a/submission_scripts/lawrencium b/submission_scripts/lawrencium
deleted file mode 100644
index 4a3379d9..00000000
--- a/submission_scripts/lawrencium
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#SBATCH -J {name}
-#SBATCH --partition es1
-#SBATCH --qos es_normal
-#SBATCH --constraint es1_v100
-#SBATCH --time {max_time}
-#SBATCH --nodes {n_nodes}
-#SBATCH --gres=gpu:2 --cpus-per-task=4
-
-module load python/3.6
-source activate $SCRATCH/fbpic_env
-
-export NUMBA_NUM_THREADS=1
-export OMP_NUM_THREADS=1
-
-# Script to run libEnsemble using multiprocessing on launch nodes.
-# Assumes Conda environment is set up.
-
-python run_example.py --comms local --nworkers {n_workers}
diff --git a/submission_scripts/lawrencium_1080ti b/submission_scripts/lawrencium_1080ti
deleted file mode 100644
index 38e18ac0..00000000
--- a/submission_scripts/lawrencium_1080ti
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#SBATCH -J {name}
-#SBATCH --partition es1
-#SBATCH --qos es_normal
-#SBATCH --constraint es1_1080ti
-#SBATCH --time {max_time}
-#SBATCH --nodes {n_nodes}
-#SBATCH --gres=gpu:4 --cpus-per-task=8
-
-module load python/3.6
-source activate $SCRATCH/fbpic_env
-
-export NUMBA_NUM_THREADS=1
-export OMP_NUM_THREADS=1
-
-# Script to run libEnsemble using multiprocessing on launch nodes.
-# Assumes Conda environment is set up.
-
-python run_example.py --comms local --nworkers {n_workers}
diff --git a/submission_scripts/perlmutter b/submission_scripts/perlmutter
deleted file mode 100644
index 6f631c4a..00000000
--- a/submission_scripts/perlmutter
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash -x
-#SBATCH -J {name}
-#SBATCH -A m3906_g
-#SBATCH -C gpu
-#SBATCH --time {max_time}
-#SBATCH --nodes {n_nodes}
-#SBATCH --ntasks-per-node=4
-#SBATCH --gpus-per-task=1
-
-module load python
-source activate optimas
-
-export MPICH_GPU_SUPPORT_ENABLED=0
-export FBPIC_ENABLE_GPUDIRECT=0
-
-export LIBE_SIM_EXTRA_ARGS="--exact -u -n 1 --gpus-per-task 1 -c 32 --mem-per-gpu=60G"
-
-python run_example.py --comms local --nworkers {n_workers}
diff --git a/submission_scripts/summit b/submission_scripts/summit
deleted file mode 100644
index 06dfe9bc..00000000
--- a/submission_scripts/summit
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash -x
-#BSUB -P APH114
-#BSUB -J {name}
-#BSUB -W {max_time}
-#BSUB -nnodes {n_nodes}
-#BSUB -alloc_flags "smt1"
-
-module purge
-module load gcc/4.8.5
-module load cuda/9.1.85
-module load spectrum-mpi/10.3.1.2-20200121
-module load fftw/3.3.8
-module load python/3.7.0-anaconda3-5.3.0
-source activate fbpic
-export CUPY_CACHE_DIR=/gpfs/alpine/scratch/rlehe/aph114/.cupy/kernel_cache
-
-export NUMBA_NUM_THREADS=1
-export OMP_NUM_THREADS=1
-export FBPIC_ENABLE_GPUDIRECT=0
-export FBPIC_DISABLE_CACHING=1
-
-# Script to run libEnsemble using multiprocessing on launch nodes.
-# Assumes Conda environment is set up.
-
-export LIBE_SIM_EXTRA_ARGS="-n 1 -a 1 -g 1 -c 1 --bind=packed:1"
-
-python run_example.py --comms local --nworkers {n_workers}
From 1d50bd2885ad5e48f70a48f5edeac269016c91b4 Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 11:24:14 +0200
Subject: [PATCH 03/18] Update readme and add citation
---
README.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 64 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index e9c47cd2..b3b2577a 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,71 @@
-
-
-
-
-
-# Optimization at scale, powered by [libEnsemble](https://libensemble.readthedocs.io/)
[](https://pypi.org/project/optimas/)
[](https://github.com/optimas-org/optimas/actions)
[](https://optimas.readthedocs.io/en/latest/?badge=latest)
[](https://zenodo.org/badge/latestdoi/287560975)
[](license.txt)
+
+
+
+
Optimas is a Python library for scalable optimization on massively-parallel supercomputers. See the [documentation](https://optimas.readthedocs.io/) for installation instructions, tutorials, and more information.
+
+## Installation
+Optimas can be directly installed form PyPI
+```sh
+pip install optimas
+```
+For detailed instructions about how to install optimas in HPC systems check the [guide](https://optimas.readthedocs.io/en/latest/user_guide/installation.html#instructions) in the documentation.
+
+## Citing optimas
+If your usage of `optimas` leads to a scientific publication, please consider citing the optimas paper:
+```bibtex
+@article{PhysRevAccelBeams.26.084601,
+ title = {Bayesian optimization of laser-plasma accelerators assisted by reduced physical models},
+ author = {Ferran Pousa, A. and Jalas, S. and Kirchen, M. and Martinez de la Ossa, A. and Th\'evenet, M. and Hudson, S. and Larson, J. and Huebl, A. and Vay, J.-L. and Lehe, R.},
+ journal = {Phys. Rev. Accel. Beams},
+ volume = {26},
+ issue = {8},
+ pages = {084601},
+ numpages = {9},
+ year = {2023},
+ month = {Aug},
+ publisher = {American Physical Society},
+ doi = {10.1103/PhysRevAccelBeams.26.084601},
+ url = {https://link.aps.org/doi/10.1103/PhysRevAccelBeams.26.084601}
+}
+```
+and libEnsemble:
+```bibtex
+@article{Hudson2022,
+ title = {{libEnsemble}: A Library to Coordinate the Concurrent
+ Evaluation of Dynamic Ensembles of Calculations},
+ author = {Stephen Hudson and Jeffrey Larson and John-Luke Navarro and Stefan M. Wild},
+ journal = {{IEEE} Transactions on Parallel and Distributed Systems},
+ volume = {33},
+ number = {4},
+ pages = {977--988},
+ year = {2022},
+ doi = {10.1109/tpds.2021.3082815}
+}
+```
From 28732e87dd3bffaf767f68fcdda68bd8b5d5c9ed Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 11:29:03 +0200
Subject: [PATCH 04/18] Update slack link
---
README.md | 2 +-
doc/source/conf.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index b3b2577a..d3cf2ec2 100644
--- a/README.md
+++ b/README.md
@@ -22,7 +22,7 @@
View Examples
·
- Support
+ Support
·
API Reference
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 06fbf8cd..b3da5f51 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -80,7 +80,7 @@
},
{
"name": "Slack",
- "url": "https://optimas.slack.com/",
+ "url": "https://optimas-group.slack.com/",
"icon": "fa-brands fa-slack",
},
],
From 1d685e627d8ac8037b7fec586a0f93cba653e227 Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 13:18:18 +0200
Subject: [PATCH 05/18] Update local installation instructions
---
doc/source/user_guide/installation_local.rst | 24 --------------------
1 file changed, 24 deletions(-)
diff --git a/doc/source/user_guide/installation_local.rst b/doc/source/user_guide/installation_local.rst
index 191d875c..e9f738d3 100644
--- a/doc/source/user_guide/installation_local.rst
+++ b/doc/source/user_guide/installation_local.rst
@@ -3,30 +3,6 @@ Installation on a local computer
The recommended approach is to install optimas in a ``conda`` environment.
-Install basic dependencies
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. code::
-
- conda install numpy pandas
-
-Install PyTorch
-~~~~~~~~~~~~~~~
-
-If your computer does not feature a CUDA-capable GPU, install PyTorch for CPU:
-
-.. code::
-
- conda install pytorch cpuonly -c pytorch
-
-
-If you have a CUDA-capable GPU and want to take make it available to optimas,
-install PyTorch with:
-
-.. code::
-
- conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia
-
Install ``mpi4py``
~~~~~~~~~~~~~~~~~~
If your system has already an MPI implementation installed, install ``mpi4py``
From 8f7d188844646a2f76ff3b3194037a2c9d8e93ef Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 13:20:27 +0200
Subject: [PATCH 06/18] Update dependencies
---
doc/source/user_guide/installation.rst | 25 ++-----------------------
1 file changed, 2 insertions(+), 23 deletions(-)
diff --git a/doc/source/user_guide/installation.rst b/doc/source/user_guide/installation.rst
index fb078224..d7eeddc5 100644
--- a/doc/source/user_guide/installation.rst
+++ b/doc/source/user_guide/installation.rst
@@ -1,30 +1,9 @@
-Installing optimas
-==================
-Optimas is supported on Python 3.8 and above. The package can be installed from
-PyPI using ``pip`` or directly from GitHub.
-
-
Dependencies
-------------
+============
+
Optimas relies on the following packages:
-* `NumPy `_ - Basic dependency for array operations.
-* `Pandas `_ - Data analysis library required for post-processing and other dependencies.
* `mpi4py `_ - Python bindings for MPI. Required for launching parallel simulations.
* `libEnsemble `_ - The backbone of optimas, orchestrates the concurrent evaluation of simulations, the resource detection and allocation, and the communication between simulations and manager.
* `jinja2 `_ - Needed to generate simulation scripts from templates.
-* `Pytorch `_ - Required by the Bayesian optimization generators.
* `Ax `_ - Algorithms for Bayesian optimization.
-
-Instructions
-------------
-Step-by-step instructions for installing optimas on a local computer and on
-different HPC clusters:
-
-.. toctree::
- :maxdepth: 1
-
- installation_local
- installation_maxwell
- installation_juwels
- installation_perlmutter
From 4b66365b5b43b7227f65e48e03e01ae9f0113e6c Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 13:21:01 +0200
Subject: [PATCH 07/18] Rename file
---
doc/source/user_guide/{installation.rst => dependencies.rst} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename doc/source/user_guide/{installation.rst => dependencies.rst} (100%)
diff --git a/doc/source/user_guide/installation.rst b/doc/source/user_guide/dependencies.rst
similarity index 100%
rename from doc/source/user_guide/installation.rst
rename to doc/source/user_guide/dependencies.rst
From b1e4ff2a67b0cabfa70d536aa73b8d9ce6e0941e Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 13:22:36 +0200
Subject: [PATCH 08/18] Rearrange user guide
---
doc/source/user_guide/index.rst | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst
index 8d508e84..76fcedfe 100644
--- a/doc/source/user_guide/index.rst
+++ b/doc/source/user_guide/index.rst
@@ -4,10 +4,14 @@ User guide
==========
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
:caption: Installation
- installation
+ dependencies
+ installation_local
+ installation_maxwell
+ installation_juwels
+ installation_perlmutter
.. toctree::
:maxdepth: 2
From 61af0d098bb2f6e3ce7cec9cecdcf082289f785b Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 13:40:09 +0200
Subject: [PATCH 09/18] Update readme
---
README.md | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index d3cf2ec2..b4b358a1 100644
--- a/README.md
+++ b/README.md
@@ -31,11 +31,21 @@
Optimas is a Python library for scalable optimization on massively-parallel supercomputers. See the [documentation](https://optimas.readthedocs.io/) for installation instructions, tutorials, and more information.
## Installation
-Optimas can be directly installed form PyPI
+From PyPI
```sh
pip install optimas
```
-For detailed instructions about how to install optimas in HPC systems check the [guide](https://optimas.readthedocs.io/en/latest/user_guide/installation.html#instructions) in the documentation.
+From GitHub
+```sh
+pip install git+https://github.com/optimas-org/optimas.git
+```
+Make sure `mpi4py` is available in your environment prior to installing optimas (see [here](https://optimas.readthedocs.io/en/latest/user_guide/installation_local.html) for more details).
+
+Optimas is regularly used and tested in large distributed HPC systems.
+We have prepared installation instructions for
+[JUWELS (JSC)](https://optimas.readthedocs.io/en/latest/user_guide/installation_juwels.html),
+[Maxwell (DESY)](https://optimas.readthedocs.io/en/latest/user_guide/installation_maxwell.html) and
+[Perlmutter (NERSC)](https://optimas.readthedocs.io/en/latest/user_guide/installation_perlmutter.html).
## Citing optimas
If your usage of `optimas` leads to a scientific publication, please consider citing the optimas paper:
From 9f912f504d7e94f1ff7ee4a2bba1cf6200befaf8 Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 13:46:37 +0200
Subject: [PATCH 10/18] Add citation information
---
README.md | 2 +-
doc/source/user_guide/citation.rst | 38 ++++++++++++++++++++++++++++++
doc/source/user_guide/index.rst | 8 ++++++-
3 files changed, 46 insertions(+), 2 deletions(-)
create mode 100644 doc/source/user_guide/citation.rst
diff --git a/README.md b/README.md
index b4b358a1..93ab285c 100644
--- a/README.md
+++ b/README.md
@@ -48,7 +48,7 @@ We have prepared installation instructions for
[Perlmutter (NERSC)](https://optimas.readthedocs.io/en/latest/user_guide/installation_perlmutter.html).
## Citing optimas
-If your usage of `optimas` leads to a scientific publication, please consider citing the optimas paper:
+If your usage of `optimas` leads to a scientific publication, please consider citing the original [paper](https://link.aps.org/doi/10.1103/PhysRevAccelBeams.26.084601):
```bibtex
@article{PhysRevAccelBeams.26.084601,
title = {Bayesian optimization of laser-plasma accelerators assisted by reduced physical models},
diff --git a/doc/source/user_guide/citation.rst b/doc/source/user_guide/citation.rst
new file mode 100644
index 00000000..682ab91b
--- /dev/null
+++ b/doc/source/user_guide/citation.rst
@@ -0,0 +1,38 @@
+Citing optimas
+==============
+
+If your usage of ``optimas`` leads to a scientific publication, please consider
+citing the original `paper `_:
+
+.. code-block:: bibtex
+
+ @article{PhysRevAccelBeams.26.084601,
+ title = {Bayesian optimization of laser-plasma accelerators assisted by reduced physical models},
+ author = {Ferran Pousa, A. and Jalas, S. and Kirchen, M. and Martinez de la Ossa, A. and Th\'evenet, M. and Hudson, S. and Larson, J. and Huebl, A. and Vay, J.-L. and Lehe, R.},
+ journal = {Phys. Rev. Accel. Beams},
+ volume = {26},
+ issue = {8},
+ pages = {084601},
+ numpages = {9},
+ year = {2023},
+ month = {Aug},
+ publisher = {American Physical Society},
+ doi = {10.1103/PhysRevAccelBeams.26.084601},
+ url = {https://link.aps.org/doi/10.1103/PhysRevAccelBeams.26.084601}
+ }
+
+and libEnsemble:
+
+.. code-block:: bibtex
+
+ @article{Hudson2022,
+ title = {{libEnsemble}: A Library to Coordinate the Concurrent
+ Evaluation of Dynamic Ensembles of Calculations},
+ author = {Stephen Hudson and Jeffrey Larson and John-Luke Navarro and Stefan M. Wild},
+ journal = {{IEEE} Transactions on Parallel and Distributed Systems},
+ volume = {33},
+ number = {4},
+ pages = {977--988},
+ year = {2022},
+ doi = {10.1109/tpds.2021.3082815}
+ }
\ No newline at end of file
diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst
index 76fcedfe..1ebbc0d5 100644
--- a/doc/source/user_guide/index.rst
+++ b/doc/source/user_guide/index.rst
@@ -19,4 +19,10 @@ User guide
basic_usage/basic_setup
basic_usage/running_with_simulations
- basic_usage/analyze_output
\ No newline at end of file
+ basic_usage/analyze_output
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Citation
+
+ citation
From fe5929c537aa01e16ba839996c580738c6bef5ca Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 14:03:00 +0200
Subject: [PATCH 11/18] Fix issue with latest Ax release
---
optimas/generators/ax/developer/multitask.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/optimas/generators/ax/developer/multitask.py b/optimas/generators/ax/developer/multitask.py
index c06e3a1c..18a7adb7 100644
--- a/optimas/generators/ax/developer/multitask.py
+++ b/optimas/generators/ax/developer/multitask.py
@@ -15,12 +15,17 @@
from ax.core.optimization_config import OptimizationConfig
from ax.core.objective import Objective as AxObjective
from ax.runners import SyntheticRunner
-from ax.modelbridge.factory import get_sobol, get_MTGP
+from ax.modelbridge.factory import get_sobol
from ax.modelbridge.torch import TorchModelBridge
from ax.core.observation import ObservationFeatures
from ax.core.generator_run import GeneratorRun
from ax.storage.json_store.save import save_experiment
from ax.storage.metric_registry import register_metric
+try:
+ from ax.modelbridge.factory import get_MTGP
+except ImportError:
+ # For Ax >= 0.3.4
+ from ax.modelbridge.factory import get_MTGP_LEGACY as get_MTGP
from optimas.generators.ax.base import AxGenerator
from optimas.core import (TrialParameter, VaryingParameter, Objective,
From 9f264e582623c836012bf15ab9ff7a07e465f9f5 Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 14:06:20 +0200
Subject: [PATCH 12/18] Make sure we test all supported python versions
---
.github/workflows/unix.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/unix.yml b/.github/workflows/unix.yml
index 226855fa..9098c214 100644
--- a/.github/workflows/unix.yml
+++ b/.github/workflows/unix.yml
@@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.8]
+ python-version: [3.8, 3.9, '3.10', 3.11]
steps:
- uses: actions/checkout@v2
From 5ddc3abe444d0a0133ab46e763f6d6f0ad292f53 Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 14:16:51 +0200
Subject: [PATCH 13/18] Run tests daily
---
.github/workflows/unix.yml | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/unix.yml b/.github/workflows/unix.yml
index 9098c214..d1996164 100644
--- a/.github/workflows/unix.yml
+++ b/.github/workflows/unix.yml
@@ -1,6 +1,11 @@
name: Unix
-on: [push, pull_request]
+on:
+ push:
+ pull_request:
+ # Run daily at midnight.
+ schedule:
+ - cron: '0 0 * * *'
jobs:
test:
From d8b4cc85803135048b737067b1b92be20b7b2feb Mon Sep 17 00:00:00 2001
From: Angel Ferran Pousa
Date: Fri, 18 Aug 2023 14:21:55 +0200
Subject: [PATCH 14/18] Add comment
---
.github/workflows/unix.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/unix.yml b/.github/workflows/unix.yml
index d1996164..25d42601 100644
--- a/.github/workflows/unix.yml
+++ b/.github/workflows/unix.yml
@@ -3,7 +3,7 @@ name: Unix
on:
push:
pull_request:
- # Run daily at midnight.
+ # Run daily at midnight (UTC).
schedule:
- cron: '0 0 * * *'
From 4755adafd19379d3542c291cfab8cddddab78fc1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=81ngel=20Ferran=20Pousa?=
Date: Fri, 18 Aug 2023 20:06:55 +0200
Subject: [PATCH 15/18] Increase Version number
---
optimas/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/optimas/__init__.py b/optimas/__init__.py
index 7fd229a3..fc79d63d 100644
--- a/optimas/__init__.py
+++ b/optimas/__init__.py
@@ -1 +1 @@
-__version__ = '0.2.0'
+__version__ = '0.2.1'
From 578d89c0744110e47e90c1bb1e6fec2141f62de5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=81ngel=20Ferran=20Pousa?=
Date: Fri, 18 Aug 2023 20:16:50 +0200
Subject: [PATCH 16/18] Make grid single column in small screens
---
doc/source/index.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 7e3c6ee2..dd7b73b9 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -12,7 +12,7 @@ parallel optimization, from a typical laptop to exascale HPC systems. It is
built on top of
`libEnsemble `_.
-.. grid:: 3
+.. grid:: 1 1 3 3
:gutter: 2
.. grid-item-card:: User guide
From cec54bdd28a5abb08314202e3fd364d859c497c0 Mon Sep 17 00:00:00 2001
From: Vadim Munteanu
Date: Wed, 23 Aug 2023 08:16:52 +0000
Subject: [PATCH 17/18] fixed double word typo
---
doc/source/user_guide/basic_usage/analyze_output.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/source/user_guide/basic_usage/analyze_output.rst b/doc/source/user_guide/basic_usage/analyze_output.rst
index d2a9d331..26d2843c 100644
--- a/doc/source/user_guide/basic_usage/analyze_output.rst
+++ b/doc/source/user_guide/basic_usage/analyze_output.rst
@@ -57,7 +57,7 @@ generated. This is the case, for example, of the
``AxClient`` with the surrogate model used for Bayesian optimization.
Generators that have this capability can also save the internal model
-model to file with a certain periodicity (set by the ``model_save_period``
+to file with a certain periodicity (set by the ``model_save_period``
attribute). By default, these models will be saved in a
``exploration/model_history`` directory.
From 9143e0c5a3a81651859bc5c32e97169960738482 Mon Sep 17 00:00:00 2001
From: Jeffrey Larson
Date: Wed, 23 Aug 2023 08:14:40 -0500
Subject: [PATCH 18/18] Automated typo detection
---
.../user_guide/basic_usage/analyze_output.rst | 2 +-
.../custom_fld_diags.py | 20 ++++++++---------
.../custom_ptcl_diags.py | 22 +++++++++----------
.../template_simulation_script.py | 4 ++--
examples/wake_t_fbpic_mt/run_example.py | 2 +-
5 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/doc/source/user_guide/basic_usage/analyze_output.rst b/doc/source/user_guide/basic_usage/analyze_output.rst
index d2a9d331..946523ff 100644
--- a/doc/source/user_guide/basic_usage/analyze_output.rst
+++ b/doc/source/user_guide/basic_usage/analyze_output.rst
@@ -14,7 +14,7 @@ In every run, the following log files are generated:
- ``libE-stats.txt``: log indicating the worker, start time, end time, etc. of
each evaluation.
-- ``ensemble.log``: log of ``libEnsemble`` containg the main events of
+- ``ensemble.log``: log of ``libEnsemble`` containing the main events of
the run. This includes the commands with each evaluation is launched.
- ``libE_history_for_run_starting__after_sim_.npy``:
numpy file that contains the
diff --git a/examples/multitask_lpa_fbpic_waket/custom_fld_diags.py b/examples/multitask_lpa_fbpic_waket/custom_fld_diags.py
index eb71731b..628d30af 100644
--- a/examples/multitask_lpa_fbpic_waket/custom_fld_diags.py
+++ b/examples/multitask_lpa_fbpic_waket/custom_fld_diags.py
@@ -326,10 +326,10 @@ def write_slices( self, field_array, iz_min, iz_max, snapshot, f2i ):
The indices between which the slices will be written
iz_min is inclusice and iz_max is exclusive
- snapshot: a LabSnaphot object
+ snapshot: a LabSnapshot object
f2i: dict
- Dictionary of correspondance between the field names
+ Dictionary of correspondence between the field names
and the integer index in the field_array
"""
# Open the file without parallel I/O in this implementation
@@ -457,10 +457,10 @@ def register_slice( self, inv_dz_lab ):
"""
# Find the index of the slice in the lab frame
if self.buffer_z_indices == []:
- # No previous index: caculate it from the absolute z_lab
+ # No previous index: calculate it from the absolute z_lab
iz_lab = int( (self.current_z_lab - self.zmin_lab)*inv_dz_lab )
else:
- # By construction, this index shoud be the previous index - 1
+ # By construction, this index should be the previous index - 1
# Handling integers avoids unstable roundoff errors, when
# self.current_z_lab is very close to zmin_lab + iz_lab*dz_lab
iz_lab = self.buffer_z_indices[-1] - 1
@@ -543,7 +543,7 @@ def __init__( self, gamma_boost, beta_boost, Nr_output ):
self.beta_boost = beta_boost
self.Nr_output = Nr_output
- # Create a dictionary that contains the correspondance
+ # Create a dictionary that contains the correspondence
# between the field names and array index
self.field_to_index = {'Er':0, 'Et':1, 'Ez':2, 'Br':3,
'Bt':4, 'Bz':5, 'Jr':6, 'Jt':7, 'Jz':8, 'rho':9}
@@ -561,7 +561,7 @@ def extract_slice( self, fld, comm, z_boost, zmin_boost, slice_array ):
The object from which to extract the fields
comm: a BoundaryCommunicator object
- Contains information about the gard cells in particular
+ Contains information about the guard cells in particular
z_boost: float (meters)
Position of the slice in the boosted frame
@@ -574,7 +574,7 @@ def extract_slice( self, fld, comm, z_boost, zmin_boost, slice_array ):
An array of reals that packs together the slices of the
different fields (always on array on the CPU).
The first index of this array corresponds to the field type
- (10 different field types), and the correspondance
+ (10 different field types), and the correspondence
between the field type and integer index is given field_to_index
The shape of this arrays is (10, 2*Nm-1, Nr_output)
"""
@@ -631,7 +631,7 @@ def extract_slice_cpu( self, fld, iz, Sz, slice_array ):
slice_array: np.ndarray
Array of shape (10, 2*Nm-1, Nr_output )
"""
- # Shortcut for the correspondance between field and integer index
+ # Shortcut for the correspondence between field and integer index
f2i = self.field_to_index
# Loop through the fields, and extract the proper slice for each field
@@ -708,7 +708,7 @@ def transform_fields_to_lab_frame( self, fields ):
gamma = self.gamma_boost
cbeta = c*self.beta_boost
beta_c = self.beta_boost/c
- # Shortcut to give the correspondance between field name
+ # Shortcut to give the correspondence between field name
# (e.g. 'Ex', 'rho') and integer index in the array
f2i = self.field_to_index
@@ -757,7 +757,7 @@ def extract_slice_cuda( Nr, iz, Sz, slice_arr,
Array of floats of shape (10, 2*Nm-1, Nr)
Er, Et, etc...: cupy.empty
- Array of complexs of shape (Nz, Nr), for the azimuthal mode m
+ Array of complexes of shape (Nz, Nr), for the azimuthal mode m
m: int
Index of the azimuthal mode involved
diff --git a/examples/multitask_lpa_fbpic_waket/custom_ptcl_diags.py b/examples/multitask_lpa_fbpic_waket/custom_ptcl_diags.py
index 84d352f4..6f6c8ff3 100644
--- a/examples/multitask_lpa_fbpic_waket/custom_ptcl_diags.py
+++ b/examples/multitask_lpa_fbpic_waket/custom_ptcl_diags.py
@@ -265,7 +265,7 @@ def write_slices( self, particle_dict, species_name, snapshot ):
species_name: String
A String that acts as the key for the buffered_slices dictionary
- snapshot: a LabSnaphot object
+ snapshot: a LabSnapshot object
"""
# Open the file without parallel I/O in this implementation
f = self.open_file(snapshot.filename)
@@ -465,7 +465,7 @@ def update_current_output_positions( self, t_boost, inv_gamma, inv_beta ):
Inverse of the Lorentz factor of the boost, and inverse
of the corresponding beta
"""
- # Some shorcuts for further calculation's purposes
+ # Some shortcuts for further calculation's purposes
t_lab = self.t_lab
t_boost_prev = t_boost - self.dt
@@ -585,7 +585,7 @@ def extract_slice( self, species, current_z_boost, previous_z_boost,
-------
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
"""
# Get a dictionary containing the particle data
@@ -644,7 +644,7 @@ def get_particle_data( self, species, current_z_boost,
-------
particle_data : A dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
"""
# CPU
@@ -724,7 +724,7 @@ def get_particle_slice( self, particle_data, current_z_boost,
----------
particle_data : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
current_z_boost, previous_z_boost : float (m)
@@ -777,7 +777,7 @@ def interpolate_particles_to_lab_frame( self, slice_data_dict,
----------
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
current_z_boost : float (m)
@@ -790,7 +790,7 @@ def interpolate_particles_to_lab_frame( self, slice_data_dict,
------
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
"""
# Shortcuts for particle attributes
@@ -845,7 +845,7 @@ def apply_opmd_standard( self, slice_data_dict, species ):
----------
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
species : A ParticleObject
@@ -856,7 +856,7 @@ def apply_opmd_standard( self, slice_data_dict, species ):
-------
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id")
"""
# Normalize momenta
@@ -883,14 +883,14 @@ def apply_selection( self, select, slice_data_dict ) :
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
Returns
-------
slice_data_dict : dictionary of 1D float and integer arrays
A dictionary that contains the particle data of
- the simulation (with normalized weigths), including optional
+ the simulation (with normalized weights), including optional
integer arrays (e.g. "id", "charge")
"""
# Initialize an array filled with True
diff --git a/examples/multitask_lpa_fbpic_waket/template_simulation_script.py b/examples/multitask_lpa_fbpic_waket/template_simulation_script.py
index ac13a24d..34783ea7 100644
--- a/examples/multitask_lpa_fbpic_waket/template_simulation_script.py
+++ b/examples/multitask_lpa_fbpic_waket/template_simulation_script.py
@@ -68,13 +68,13 @@ def run_simulation():
w = np.abs(q / ct.e)
bunch = ParticleBunch(w, x, y, z, ux, uy, uz, name="bunch")
- # Distance between right bounday and laser centroid.
+ # Distance between right boundary and laser centroid.
dz_lb = 4.0 * ct.c * tau_laser
# Maximum radial extension of the plasma.
p_rmax = 2.5 * w0_laser
- # Box lenght.
+ # Box length.
l_box = dz_lb + 90e-6
# Number of diagnostics
diff --git a/examples/wake_t_fbpic_mt/run_example.py b/examples/wake_t_fbpic_mt/run_example.py
index 0c5cfa46..0c27bc20 100644
--- a/examples/wake_t_fbpic_mt/run_example.py
+++ b/examples/wake_t_fbpic_mt/run_example.py
@@ -1,6 +1,6 @@
"""
This example optimizes the focusing strength of an active plasma lens using
-Wake-T and FBPIC simulations by emplying a multitask generator.
+Wake-T and FBPIC simulations by employing a multitask generator.
The simulations are performed using the template defined in the
`template_simulation_script.py` file, which executes Wake-T or FBPIC