diff --git a/.github/workflows/qa.yml b/.github/workflows/qa.yml index fdf74797..a77c021f 100644 --- a/.github/workflows/qa.yml +++ b/.github/workflows/qa.yml @@ -24,14 +24,12 @@ jobs: name: Code QA runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - run: pip install black flake8 isort - - run: black --version - - run: isort --version - - run: flake8 --version - - run: isort --check . - - run: black --check . - - run: flake8 . + - run: sudo apt-get install -y pandoc # Needed by sphinx for notebooks + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - uses: pre-commit/action@v3.0.1 short-tests: name: Short tests @@ -44,4 +42,4 @@ jobs: - name: Install climetlab run: pip install -e . - run: pip install pytest - - run: pytest -vv -E short \ No newline at end of file + - run: pytest -vv -E short diff --git a/.github/workflows/test-and-release.yml b/.github/workflows/test-and-release.yml index 906bd879..aba44f22 100644 --- a/.github/workflows/test-and-release.yml +++ b/.github/workflows/test-and-release.yml @@ -35,34 +35,12 @@ jobs: name: Code QA runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - run: pip install black flake8 isort - - run: black --version - - run: isort --version - - run: flake8 --version - - run: isort --check . - - run: black --check . - - run: flake8 . - - # test_build_doc: - # name: Test building documentation - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v3 - - # - name: Install packages needed to build the documentation - # run: | - # pip install sphinx - # pip install -r docs/requirements.txt - # sudo apt-get install pandoc - # pip freeze - - # - name: Documentation - # run: | - # make clean - # make html - # # make linkcheck (don't run yet) - # working-directory: docs + - run: sudo apt-get install -y pandoc # Needed by sphinx for notebooks + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - uses: pre-commit/action@v3.0.1 download-test-data: name: Download test data @@ -71,7 +49,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: ${{ matrix.python-version }} + python-version: 3.x - name: Tokens shell: python @@ -114,7 +92,7 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Tokens + - name: Tokens shell: python env: ECMWFAPIRC: ${{ secrets.ECMWFAPIRC }} @@ -151,7 +129,7 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Tokens + - name: Tokens shell: python env: ECMWFAPIRC: ${{ secrets.ECMWFAPIRC }} @@ -172,19 +150,16 @@ jobs: pip install pytest pip freeze - name: Tests notebooks - run: pytest tests/documentation/test_notebooks.py + run: pytest tests/documentation/test_notebooks.py short-tests: - # if: (github.event_name == 'release' && github.event.action == 'created') || github.ref == 'refs/heads/main' - # if: github.event_name == 'release' && github.event.action == 'created' - # if: github.ref == 'refs/heads/main' + needs: [download-test-data] strategy: matrix: platform: ["ubuntu-latest", "macos-latest", "windows-latest"] - python-version: ["3.8", "3.9", "3.10"] - # platform: ["ubuntu-latest"] - # python-version: ["3.10"] + python-version: ["3.9", "3.10"] + name: Short tests Python ${{ matrix.python-version }} ${{ matrix.platform }} runs-on: ${{ matrix.platform }} steps: @@ -199,7 +174,8 @@ jobs: name: test-data - run: tar xvf test-data.tar - run: cat test-data/dataA.txt - - name: Tokens + + - name: Tokens # (this should be removed when tests are refactored) shell: python env: @@ -213,8 +189,12 @@ jobs: if os.environ[n]: with open(m, "w") as f: print(os.environ[n], file=f) + + - run: pip install --upgrade pip + - name: Install climetlab run: pip install -e . + - run: climetlab versions - name: Install test tools run: | @@ -231,51 +211,29 @@ jobs: # notebooks need more dependencies # pytest --durations=0 -E release -k 'not test_notebooks' pytest --durations=10 -vv -E short - # - name: Install climetlab full - # run: | - # pip install .[interactive,tensorflow,zarr] - # pip freeze - # - name: Tests with dependencies - # run: | - # pytest --durations=10 -vv -E short - check-version-tag: - if: github.event_name == 'release' && github.event.action == 'created' - name: Check versions and tags - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.8" - - name: Check that tag version matches package version - run: | - tag=${GITHUB_REF#refs/tags/} - version=$(python setup.py --version) - echo 'tag='$tag - echo "version file="$version - test "$tag" == "$version" deploy: name: Upload to Pypi and release - needs: [check-version-tag, short-tests, quality, tests-with-external-download, tests-notebooks] - # needs: [check-version-tag, short-tests, quality, test_build_doc, tests-with-external-download, tests-notebooks] + needs: [short-tests, quality, tests-with-external-download, tests-notebooks] runs-on: ubuntu-latest + steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.9" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine - - name: Build pip package - run: python setup.py sdist - - name: Publish - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} - run: twine upload --verbose dist/* + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.x + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build wheel twine + - name: Build and publish + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} + run: | + python -m build + twine upload dist/* diff --git a/.gitignore b/.gitignore index ea954df9..5788df3d 100644 --- a/.gitignore +++ b/.gitignore @@ -171,3 +171,4 @@ tempCodeRunnerFile.py *.dot *.zarr/ testdata/ +_version.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4bf40ccc..ead3899b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,22 +1,40 @@ -exclude: 'experiments' repos: -- repo: https://github.com/ambv/black - rev: 24.1.1 - hooks: - - id: black - language_version: python3.10 -- repo: https://github.com/pycqa/isort - rev: 5.13.0 - hooks: - - id: isort - name: isort (python) - - id: isort - name: isort (cython) - types: [cython] - - id: isort - name: isort (pyi) - types: [pyi] -- repo: https://github.com/pycqa/flake8 - rev: 7.0.0 - hooks: - - id: flake8 + + +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-yaml # Check YAML files for syntax errors only + args: [--unsafe, --allow-multiple-documents] + - id: debug-statements # Check for debugger imports and py37+ breakpoint() + - id: end-of-file-fixer # Ensure files end in a newline + - id: trailing-whitespace # Trailing whitespace checker + - id: no-commit-to-branch # Prevent committing to main / master + - id: check-added-large-files # Check for large files added to git + - id: check-merge-conflict # Check for files that contain merge conflict + +- repo: https://github.com/psf/black-pre-commit-mirror + rev: 24.1.1 + hooks: + - id: black + args: [--line-length=120] + +- repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + - id: isort + args: + - -l 120 + - --force-single-line-imports + - --profile black + + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.0 + hooks: + - id: ruff + args: + - --line-length=120 + - --fix + - --exit-non-zero-on-fix + - --preview diff --git a/README.md b/README.md index 5ccdd5af..97a133b0 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,5 @@ The documentation can be found at . See https://climetlab.readthedocs.io/en/latest/guide/pluginlist.html. ### License -[Apache License 2.0](LICENSE) In applying this licence, ECMWF does not waive the privileges and immunities +[Apache License 2.0](LICENSE) In applying this licence, ECMWF does not waive the privileges and immunities granted to it by virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction. - - diff --git a/climetlab-and-ecmwflibs-install.md b/climetlab-and-ecmwflibs-install.md index b36d658e..068ecf0c 100644 --- a/climetlab-and-ecmwflibs-install.md +++ b/climetlab-and-ecmwflibs-install.md @@ -1,9 +1,9 @@ # How to run climetlab without ecmwflibs (using conda) -This workaround should address some of the issues related to ecmwflibs version not found or not installing correctly occuring during the installation of the climetlab package. +This workaround should address some of the issues related to ecmwflibs version not found or not installing correctly occuring during the installation of the climetlab package. This is how to install eccodes manually and use an environment variable to avoid installing ecmwflibs. -``` +``` # install eccodes manually conda install -c conda-forge eccodes pip install eccodes @@ -15,7 +15,7 @@ CLIMETLAB_DO_NOT_INSTALL_ECMWFLIBS=1 pip install climetlab # Limitations Obviously installing climetlab without one of its dependencies has some impact on its features. -And installing eccodes is not enough to provide them all : more packages are bundled in ecmwflibs (other than eccodes). +And installing eccodes is not enough to provide them all : more packages are bundled in ecmwflibs (other than eccodes). Depending on the required functinalities, there may be other missing packages to install manually. | Feature | default (ecmwflibs) | With workaround above | @@ -29,7 +29,7 @@ All CliMetLab features related to **reading (or writing) GRIB** data be supporte # Debugging ## Check eccodes installation -``` +``` import eccodes print(eccodes.__file__) ``` diff --git a/climetlab/version b/climetlab/version deleted file mode 100644 index 78cfa5eb..00000000 --- a/climetlab/version +++ /dev/null @@ -1 +0,0 @@ -0.21.6 diff --git a/climetlab/version.py b/climetlab/version.py deleted file mode 100644 index 27b010e6..00000000 --- a/climetlab/version.py +++ /dev/null @@ -1,22 +0,0 @@ -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os - - -def get_version(): - version_file = os.path.join(os.path.dirname(__file__), "version") - with open(version_file, "r") as f: - version = f.readlines() - version = version[0] - version = version.strip() - return version - - -__version__ = get_version() diff --git a/dev/dev.ipynb b/dev/dev.ipynb index 238d4da5..43410b19 100644 --- a/dev/dev.ipynb +++ b/dev/dev.ipynb @@ -94,4 +94,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/dev/in-progress.ipynb b/dev/in-progress.ipynb index e2b40684..a8d2c30f 100644 --- a/dev/in-progress.ipynb +++ b/dev/in-progress.ipynb @@ -1162,4 +1162,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/dev/tf.py b/dev/tf.py index c613e8fc..0f82f13d 100644 --- a/dev/tf.py +++ b/dev/tf.py @@ -1,17 +1,11 @@ -import keras.layers as layers -import numpy as np import tensorflow as tf -import tensorflow.keras as keras -from tensorflow.keras.callbacks import EarlyStopping, TensorBoard -from tensorflow.keras.layers import ( - Conv2D, - Dense, - Dropout, - Flatten, - Input, - Lambda, - Reshape, -) +from tensorflow.keras.callbacks import EarlyStopping +from tensorflow.keras.layers import Conv2D +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Dropout +from tensorflow.keras.layers import Flatten +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Reshape from tensorflow.keras.models import Sequential import climetlab as cml @@ -46,9 +40,7 @@ def __init__(self, *args, **kwargs): def call(self, inputs): w, h = self.kernel_size inputs = tf.concat([inputs, inputs[:, :, :w, :]], axis=2) - inputs = tf.pad( - inputs, [[0, 0], [h // 2, h // 2], [0, 0], [0, 0]], constant_values=0 - ) + inputs = tf.pad(inputs, [[0, 0], [h // 2, h // 2], [0, 0], [0, 0]], constant_values=0) return super().call(inputs) diff --git a/docs/apply-fmt.sh b/docs/apply-fmt.sh index 762a8e38..cf5e9fb1 100755 --- a/docs/apply-fmt.sh +++ b/docs/apply-fmt.sh @@ -1,5 +1,5 @@ : for n in $(find . -name '*.rst') do - rstfmt $n + rstfmt $n done diff --git a/docs/contributing/datasets.rst b/docs/contributing/datasets.rst index f9afe759..df8ef588 100644 --- a/docs/contributing/datasets.rst +++ b/docs/contributing/datasets.rst @@ -21,11 +21,11 @@ And more examples can be found in the non-exhaustive :doc:`list of CliMetLab plugins <../guide/pluginlist>`. .. note:: - + **Naming convention** - A plugin package name (pip) should preferably start with ``climetlab-`` - and use dashes "-". + and use dashes "-". - The Python package to import should start with :py:class:`climetlab\_` and must use underscores "_". - A CliMetLab dataset defined by a plugin should start with @@ -56,7 +56,7 @@ Here is a `verbose output of running the plugin creation script `_, Notebook are automatically tested if the repository is on github. -Links on the README file are pointing to binder, colab, etc. to run the automatically created notebook. +Links on the README file are pointing to binder, colab, etc. to run the automatically created notebook. Manually creating the Python package ------------------------------------ @@ -132,8 +132,8 @@ The dataset name can be changed by changing the ``setup.py`` file. .. code-block:: python - - ["dataset-plugin-rain-observations=climetlab_dataset_plugin.rain_observations:RainObservations"] - + ["dataset-plugin-new-name =climetlab_dataset_plugin.rain_observations:RainObservations"] + - ["dataset-plugin-rain-observations=climetlab_dataset_plugin.rain_observations:RainObservations"] + + ["dataset-plugin-new-name =climetlab_dataset_plugin.rain_observations:RainObservations"] A good practice is to change keep the class name in sync with the dataset name. @@ -145,8 +145,8 @@ New datasets can be added to the plugin, as long as the corresponding class is c .. code-block:: python - - ["dataset-plugin-rain-observations=climetlab_dataset_plugin.rain_observations:RainObservations"] - + ["dataset-plugin-rain-observations=climetlab_dataset_plugin.rain_observations:RainObservations", + - ["dataset-plugin-rain-observations=climetlab_dataset_plugin.rain_observations:RainObservations"] + + ["dataset-plugin-rain-observations=climetlab_dataset_plugin.rain_observations:RainObservations", + "dataset-plugin-rain-forecast =climetlab_dataset_plugin.rain_observations:RainForecast"] @@ -156,4 +156,4 @@ CliMetLab hooks .. todo:: Document .source attribute, to_xarray(), to_pandas(), to_etc() - Point to decorator \ No newline at end of file + Point to decorator diff --git a/docs/contributing/grib.rst b/docs/contributing/grib.rst index 01a60f53..e6418a58 100644 --- a/docs/contributing/grib.rst +++ b/docs/contributing/grib.rst @@ -30,8 +30,8 @@ There are two ways to write GRIB files: - To save data from MARS, CDS or other, when GRIB is already the native format of the data, use the ``source.save(filename)`` method. This method is implemented only on a sources relying on GRIB. -- CliMetLab also supports writing custom GRIB files, with **modified values or custom attributes** -through the function ```cml.new_grib_output()``. See usage example in the example notebook +- CliMetLab also supports writing custom GRIB files, with **modified values or custom attributes** +through the function ```cml.new_grib_output()``. See usage example in the example notebook (:ref:`examples`). @@ -62,13 +62,13 @@ How to build a index for **one** given URL containing a GRIB file ? Then upload the file `large_grib_1.index` and make sure it is available at: "https://get.ecmwf.int/repository/test-data/climetlab/test-data/input/indexed-urls/large_grib_1.index" -This allows accessing the data with +This allows accessing the data with .. code-block:: python cml.load_source("indexed-url", "https://get.ecmwf.int/repository/test-data/climetlab/test-data/input/indexed-urls/large_grib_1.grb" - ) + ) How to build indexes for a set of URLs containing GRIB files ? @@ -85,7 +85,7 @@ Then upload the files `large_grib_1.index` and `large_grib_2.index` and make sur "https://get.ecmwf.int/repository/test-data/climetlab/test-data/input/indexed-urls/large_grib_1.index" "https://get.ecmwf.int/repository/test-data/climetlab/test-data/input/indexed-urls/large_grib_2.index" -This allows accessing the data with +This allows accessing the data with .. code-block:: python @@ -136,4 +136,4 @@ line interface. .. todo:: - Update this when mirror implementation changes. \ No newline at end of file + Update this when mirror implementation changes. diff --git a/docs/contributing/normalize.rst b/docs/contributing/normalize.rst index 8fc2e4db..4d7c0d11 100644 --- a/docs/contributing/normalize.rst +++ b/docs/contributing/normalize.rst @@ -184,4 +184,4 @@ multiple -------- .. todo:: - Add example from tests. \ No newline at end of file + Add example from tests. diff --git a/docs/contributing/plotting.rst b/docs/contributing/plotting.rst index 6f237237..87b75767 100644 --- a/docs/contributing/plotting.rst +++ b/docs/contributing/plotting.rst @@ -9,4 +9,3 @@ Plotting .. todo:: Explain how to contribute layers, projections and styles - diff --git a/docs/contributing/sources.rst b/docs/contributing/sources.rst index d1c413c8..d798890f 100644 --- a/docs/contributing/sources.rst +++ b/docs/contributing/sources.rst @@ -33,7 +33,7 @@ integration as follow: ) The package name and the class name should match the class defined in the code -of the plugin: +of the plugin: - **source-name**: is the string that will be used in ``cml.load_source("source-name", ...)`` in oder to trigger the source plugin code. diff --git a/docs/developer/gallery.rst b/docs/developer/gallery.rst index 0c6ab503..d87c1103 100644 --- a/docs/developer/gallery.rst +++ b/docs/developer/gallery.rst @@ -2,4 +2,3 @@ Gallery ======= .. module-output:: generate_gallery_rst - diff --git a/docs/developer/plotting.rst b/docs/developer/plotting.rst index 8bde535c..16dac016 100644 --- a/docs/developer/plotting.rst +++ b/docs/developer/plotting.rst @@ -895,42 +895,42 @@ mmap - | Default * - | **subpage_x_axis_type** - | + | - | "regular", "date", "geoline", "logarithmic" - | "regular" * - | **subpage_y_axis_type** - | + | - | "regular", "date", "geoline", "logarithmic" - | "regular" * - | **x_min** - | + | - | float - | 0.0 * - | **subpage_x_automatic** - | + | - | bool - | False * - | **subpage_y_automatic** - | + | - | bool - | False * - | **x_max** - | + | - | float - | 100.0 * - | **y_min** - | + | - | float - | 0.0 * - | **y_max** - | + | - | float - | 100.0 @@ -1646,5 +1646,3 @@ mtable | Binning information - | bool - | True - - diff --git a/docs/developer/plugins.rst b/docs/developer/plugins.rst index b5e12d5c..0512fae0 100644 --- a/docs/developer/plugins.rst +++ b/docs/developer/plugins.rst @@ -3,20 +3,20 @@ Climetlab Plugin mechanism ========================== -This document discuss how plugins are integrated into CliMetLab. There are two ways to add +This document discuss how plugins are integrated into CliMetLab. There are two ways to add a plugin into CliMetLab: - A Python package using the standard `Python plugin `_ mechanism based on ``entry_points``. This is the generic CliMetLab plugin mechanism. - A YAML file can be also be used to create plugins, when the plugin is simple enough - and used only generic predefined code. + and used only generic predefined code. (currently only for :doc:`dataset plugins `). Plugin as python packages using ``entry_points`` ------------------------------------------------ -During the installation of the pip package, the plugin registers itself thanks to +During the installation of the pip package, the plugin registers itself thanks to the entry points in its setup.py file, making CliMetLab aware of the new capabilities. Then, the user can take advantage of the shared code though the enhanced :py:func:`climetlab.load_dataset()`, :py:func:`climetlab.load_source()` diff --git a/docs/developer/sources.rst b/docs/developer/sources.rst index 3bd59897..69f708a4 100644 --- a/docs/developer/sources.rst +++ b/docs/developer/sources.rst @@ -4,4 +4,3 @@ Data sources .. todo:: Describe what is a class inheriting from :py:class:`cml.Source`. - diff --git a/docs/developer/xml2rst.py b/docs/developer/xml2rst.py index 43f9bdd6..496b008d 100755 --- a/docs/developer/xml2rst.py +++ b/docs/developer/xml2rst.py @@ -13,7 +13,8 @@ import os import re import sys -from collections import OrderedDict, defaultdict +from collections import OrderedDict +from collections import defaultdict from textwrap import fill import xmltodict @@ -251,11 +252,7 @@ def _(x): return {"type": ["string", "boolean"], "enum": [_(x) for x in self.values]} if self._param._defs.get("to") in ENUMS: - return { - "$ref": "definitions.json#/definitions/{}".format( - to_snake_case(self._param._defs.get("to")) - ) - } + return {"$ref": "definitions.json#/definitions/{}".format(to_snake_case(self._param._defs.get("to")))} return {"type": "string", "enum": sorted(self.values)} @@ -434,10 +431,7 @@ def load(n): def produce_rst(): - print( - ".. DO NOT EDIT - This page is automatically generated by %s" - % (os.path.basename(sys.argv[0]),) - ) + print(".. DO NOT EDIT - This page is automatically generated by %s" % (os.path.basename(sys.argv[0]),)) print() print("Plotting") print("========") @@ -569,9 +563,7 @@ def produce_schemas(directory): definitions[name] = {"type": "string", "enum": sorted(v["values"].keys())} with open(path + ".tmp", "w") as f: - print( - json.dumps({"definitions": definitions}, sort_keys=True, indent=4), file=f - ) + print(json.dumps({"definitions": definitions}, sort_keys=True, indent=4), file=f) os.rename(path + ".tmp", path) for action, klasses in sorted(ACTIONS.items()): diff --git a/docs/examples/02-source-url.ipynb b/docs/examples/02-source-url.ipynb index 6e507a7f..f7bee3c2 100644 --- a/docs/examples/02-source-url.ipynb +++ b/docs/examples/02-source-url.ipynb @@ -115,4 +115,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/examples/03-source-cds.ipynb b/docs/examples/03-source-cds.ipynb index c5d2c252..771ef257 100644 --- a/docs/examples/03-source-cds.ipynb +++ b/docs/examples/03-source-cds.ipynb @@ -194,4 +194,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/examples/04-source-mars.ipynb b/docs/examples/04-source-mars.ipynb index fbd6bd52..8d947833 100644 --- a/docs/examples/04-source-mars.ipynb +++ b/docs/examples/04-source-mars.ipynb @@ -133,4 +133,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/examples/09-mars-odb.ipynb b/docs/examples/09-mars-odb.ipynb index 6d994d41..05ace7a8 100644 --- a/docs/examples/09-mars-odb.ipynb +++ b/docs/examples/09-mars-odb.ipynb @@ -498,4 +498,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/examples/13-icoads.ipynb b/docs/examples/13-icoads.ipynb index be572c85..b767a5c8 100644 --- a/docs/examples/13-icoads.ipynb +++ b/docs/examples/13-icoads.ipynb @@ -182,4 +182,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/examples/15-more-plotting.ipynb b/docs/examples/15-more-plotting.ipynb index 3dbe2af3..dd05fe68 100644 --- a/docs/examples/15-more-plotting.ipynb +++ b/docs/examples/15-more-plotting.ipynb @@ -760,4 +760,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/examples/renumber.sh b/docs/examples/renumber.sh index 4b351551..c1099236 100755 --- a/docs/examples/renumber.sh +++ b/docs/examples/renumber.sh @@ -2,7 +2,7 @@ for n in $(seq $1 89) do - m=$((n+1)) + m=$((n+1)) f1=$(printf '%02d-' $n) f2=$(printf '%02d-' $m) p=$(ls $f1*.ipynb 2>/dev/null) diff --git a/docs/guide/caching.rst b/docs/guide/caching.rst index bc3182aa..13015928 100644 --- a/docs/guide/caching.rst +++ b/docs/guide/caching.rst @@ -27,21 +27,21 @@ CliMetLab cache configuration is managed through the CliMetLab :doc:`settings`. users working is a different use case and should be supported through using mirrors. `Feedback and feature requests are welcome. `_ - + .. _cache_location: Cache location -------------- The cache location is defined by the ``cache‑directory`` setting. Its default - value depends on your system: + value depends on your system: - ``/tmp/climetlab-$USER`` for Linux, - ``C:\\Users\\$USER\\AppData\\Local\\Temp\\climetlab-$USER`` for Windows - ``/tmp/.../climetlab-$USER`` for MacOS - + The cache location can be read and modified either with shell command or within python. - + .. note:: It is recommended to restart your Jupyter kernels after changing @@ -78,7 +78,7 @@ Cache location >>> cml.settings.get("cache-directory") # Cache directory has been modified /big-disk/climetlab-cache - More generally, the CliMetLab settings can be read, modified, reset + More generally, the CliMetLab settings can be read, modified, reset to their default values using the ``climetlab`` command or from python, see the :doc:`Settings documentation `. diff --git a/docs/guide/cmdline.rst b/docs/guide/cmdline.rst index e781c060..e7f744a6 100644 --- a/docs/guide/cmdline.rst +++ b/docs/guide/cmdline.rst @@ -32,4 +32,3 @@ prompt. To exit the interactive prompt use Control+D. .. module-output:: generate_cmdline_help - diff --git a/docs/guide/dask.rst b/docs/guide/dask.rst index 14fdf4f9..ef3dbfea 100644 --- a/docs/guide/dask.rst +++ b/docs/guide/dask.rst @@ -67,7 +67,7 @@ Access the dask logs .. todo:: Not implemented yet. -Stop the dask cluster +Stop the dask cluster ********************* The dask cluster and client will usually stop automatically when the python process ends. @@ -101,7 +101,7 @@ Create the yaml file $HOME/.climetlab/dask/hpc-name-config-1.yaml. Then use it w .. note:: For HPC system admin: - Adding yaml files in /opt/climetlab/dask/*.yaml will give global access to all users. + Adding yaml files in /opt/climetlab/dask/*.yaml will give global access to all users. Reuse the dask client @@ -125,4 +125,4 @@ Scale the dask cluster from climetlab.utils.dask import start deploy = start('slurm') - deploy.scale(..) \ No newline at end of file + deploy.scale(..) diff --git a/docs/guide/data_handling.rst b/docs/guide/data_handling.rst index 21ffcc00..58d24a60 100644 --- a/docs/guide/data_handling.rst +++ b/docs/guide/data_handling.rst @@ -275,4 +275,4 @@ Merging Data sources bar = ["a", "b"], qux = "unique" merger = MyMerger() - ) \ No newline at end of file + ) diff --git a/docs/guide/datasets.rst b/docs/guide/datasets.rst index 9013a562..980bbba2 100644 --- a/docs/guide/datasets.rst +++ b/docs/guide/datasets.rst @@ -17,7 +17,7 @@ It also provides **metadata** and **additional functionalities**. or other. - Relevant **metadata** are attached directly to the dataset to provides - additional information such as :ref:`an URL, a citation, licence, etc. ` + additional information such as :ref:`an URL, a citation, licence, etc. ` - **Additional functionalities**: When working on data, we are often writing code to transform, preprocess, @@ -33,7 +33,7 @@ It also provides **metadata** and **additional functionalities**. .. note:: - :ref:`Dataset ` objects differ from data :ref:`Source ` objects, + :ref:`Dataset ` objects differ from data :ref:`Source ` objects, as Datasets refer to a given set of data (such as "the 2m temperature on Europe in 2015", while Sources are more generic such as "url"). @@ -68,7 +68,7 @@ Other arguments are defined by the plugin maintainer and are documented in the plugin documentation (see :doc:`/guide/pluginlist`). The Dataset object provides methods to access and use its data such as -``to_xarray()`` or ``to_pandas()`` or ``to_numpy()`` (there are other +``to_xarray()`` or ``to_pandas()`` or ``to_numpy()`` (there are other :ref:`methods that can be used to access data ` from a Dataset). .. code-block:: python @@ -188,4 +188,4 @@ There is no need to import the plugin package to enable loading the dataset: .. code-block:: python - >>> import climetlab_demo_dataset # Not needed \ No newline at end of file + >>> import climetlab_demo_dataset # Not needed diff --git a/docs/guide/howtos.rst b/docs/guide/howtos.rst index 4f3e8ba9..87fd0ae3 100644 --- a/docs/guide/howtos.rst +++ b/docs/guide/howtos.rst @@ -58,4 +58,4 @@ How to share my cache directory with another user ? Can CliMetLab help using dask ? ------------------------------- - See :doc:`/guide/dask`. \ No newline at end of file + See :doc:`/guide/dask`. diff --git a/docs/guide/pluginlist.rst b/docs/guide/pluginlist.rst index b59d16a8..b4dbfcaa 100644 --- a/docs/guide/pluginlist.rst +++ b/docs/guide/pluginlist.rst @@ -5,7 +5,7 @@ List of CliMetLab plugins .. note:: - This list is **not exhaustive**. + This list is **not exhaustive**. Some plugins are not listed here because we are not aware of them or because they are for internal use only, or not ready to be shared. @@ -47,34 +47,34 @@ in ``cml.load_dataset``. Tropical cyclones. In progress. Datasets provided: ``tc-*`` - + - `climetlab-maelstrom-yr `_: - Alpha. Gridded weather data for the Nordics, designed for ML postprocessing. Part of the `MAELSTROM `_ project. + Alpha. Gridded weather data for the Nordics, designed for ML postprocessing. Part of the `MAELSTROM `_ project. Dataset provided: ``maelstom-yr`` - + - `climetlab-maelstrom-nogwd `_: - Alpha. Dataset for learning non-orographic gravity wave parametrization. Part of the `MAELSTROM `_ project. + Alpha. Dataset for learning non-orographic gravity wave parametrization. Part of the `MAELSTROM `_ project. Dataset provided: ``maelstom-nogwd`` - + - `climetlab-maelstrom-radiation `_: - Alpha. Dataset for learning radiative heating parametrization. Part of the `MAELSTROM `_ project. + Alpha. Dataset for learning radiative heating parametrization. Part of the `MAELSTROM `_ project. Datasets provided: ``maelstom-radiation``, ``maelstom-radiation-tf`` - + - `climetlab-maelstrom-ens10 `_: - Alpha. Dataset for testing ensemble postprocessing techniques. Part of the `MAELSTROM `_ project. + Alpha. Dataset for testing ensemble postprocessing techniques. Part of the `MAELSTROM `_ project. Datasets provided: ``maelstrom-ens5mini``, ``maelstrom-ens10`` - `climetlab-maelstrom-downscaling `_: - Alpha. Dataset for testing downscaling techniques. Part of the `MAELSTROM `_ project. + Alpha. Dataset for testing downscaling techniques. Part of the `MAELSTROM `_ project. Dataset provided: ``maelstrom-downscaling`` - `climetlab-maelstrom-power-production `_: - Alpha. Dataset for predicting wind farm power production from weather data. Part of the `MAELSTROM `_ project. + Alpha. Dataset for predicting wind farm power production from weather data. Part of the `MAELSTROM `_ project. Datasets provided: ``maelstrom-power-production``, ``maelstrom-weather-model-level``, ``maelstrom-weather-pressure-level``, ``maelstrom-weather-surface-level``, ``maelstrom-constants-a-b`` @@ -109,7 +109,7 @@ Installing a :doc:`source ` plugin, allows using an additional s in ``cml.load_source``. - `google-drive `_ - |climetlab-google-drive-source-build-status| + |climetlab-google-drive-source-build-status| Access public files in Google Drive with ``cml.load_source("google-drive", file_id="...")`` @@ -120,7 +120,7 @@ in ``cml.load_source``. - `climetlab-demo-source `_ - |climetlab-demo-source-build-status| + |climetlab-demo-source-build-status| Demo plugin to illustrate to source plugin mechanism. @@ -133,7 +133,7 @@ Drafts Source Plugins ~~~~~~~~~~~~~~~~~~~~~ - `stvl `_ - |climetlab-stvl-build-status| + |climetlab-stvl-build-status| Access data the STVL database with ``cml.load_source("stvl", ...)`` @@ -141,5 +141,3 @@ Drafts Source Plugins .. |climetlab-stvl-build-status| image:: https://github.com/ecmwf-lab/climetlab-stvl/actions/workflows/check-and-publish.yml/badge.svg :alt: build status :target: https://github.com/ecmwf-lab/climetlab-stvl/actions/workflows/check-and-publish.yml - - diff --git a/docs/guide/settings.rst b/docs/guide/settings.rst index 045281e1..05b75a67 100644 --- a/docs/guide/settings.rst +++ b/docs/guide/settings.rst @@ -87,7 +87,7 @@ Or using the ``climetlab`` interactive prompt: .. _settings_table: - + Default values -------------- diff --git a/docs/guide/sources.rst b/docs/guide/sources.rst index ffad4c7b..37c77203 100644 --- a/docs/guide/sources.rst +++ b/docs/guide/sources.rst @@ -26,7 +26,7 @@ A Source also provides metadata and additional functionalities: >>> source = cml.load_source(name, *args, **kwargs) The Source object provides methods to access and use its data such as -``to_xarray()`` or ``to_pandas()`` or ``to_numpy()`` (there are other +``to_xarray()`` or ``to_pandas()`` or ``to_numpy()`` (there are other :ref:`methods that can be used to access data ` from a data Source). .. code-block:: python @@ -38,7 +38,7 @@ The Source object provides methods to access and use its data such as .. note:: - :ref:`Source ` objects differ from data :ref:`Dataset ` objects, + :ref:`Source ` objects differ from data :ref:`Dataset ` objects, as Datasets refer to a given set of data (such as "the 2m temperature on Europe in 2015", while Sources are more generic such as "url"). @@ -72,7 +72,7 @@ The simplest data source is the ``"file"`` source that accesses a local file. >>> import climetlab as cml >>> data = cml.load_source("file", "path/to/file") >>> data.to_xarray() # for gridded data fields - >>> data.to_pandas() # for non-gridded data + >>> data.to_pandas() # for non-gridded data *CliMetLab* will inspect the content of the file to check for any of the supported data formats listed below: @@ -178,9 +178,9 @@ If the format is not supported, additional code can be included in CliMetLab to url --- -The ``"url"`` data source is very similar to the ``"file"`` source. +The ``"url"`` data source is very similar to the ``"file"`` source. -This sources downloads the data from the specified address and stores it in the :ref:`cache `, +This sources downloads the data from the specified address and stores it in the :ref:`cache `, then it operates similarly to the :ref:`"file" source ` above. The supported data formats are the same as for the :ref:`"file" source `. @@ -315,7 +315,7 @@ are also publicly available. To access data from the MARS, you will need to register and retrieve an access token. For a more extensive documentation about MARS, please refer to the `MARS documentation `_ (or its -`access from the internet `_ through its +`access from the internet `_ through its `web API `_). @@ -379,7 +379,7 @@ Example >>> def only_csv(path): return path.endswith(".csv") >>> source = cml.load_source("zenodo", record_id=5020468, filter=only_csv) - >>> source.to_pandas() + >>> source.to_pandas() .. note:: diff --git a/docs/installing.rst b/docs/installing.rst index f89249f0..637b6821 100644 --- a/docs/installing.rst +++ b/docs/installing.rst @@ -70,7 +70,7 @@ account has not been verified. .. code-block:: html - WARNING: Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) + WARNING: Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')':.... diff --git a/experiments/bias.py b/experiments/bias.py index 64962556..25235247 100644 --- a/experiments/bias.py +++ b/experiments/bias.py @@ -73,18 +73,17 @@ def build_model(shape_in, shape_out): - from tensorflow.keras.layers import Dense, Flatten, Input, Reshape + from tensorflow.keras.layers import Dense + from tensorflow.keras.layers import Flatten + from tensorflow.keras.layers import Input + from tensorflow.keras.layers import Reshape from tensorflow.keras.models import Sequential model = Sequential(name="ML_model") model.add(Input(shape=(shape_in[-3], shape_in[-2], shape_in[-1]))) model.add(Flatten()) model.add(Dense(shape_out[-3] * shape_out[-2] * shape_out[-1])) - model.add( - Reshape( - target_shape=(shape_out[-3], shape_out[-2], shape_out[-1]), name="output" - ) - ) + model.add(Reshape(target_shape=(shape_out[-3], shape_out[-2], shape_out[-1]), name="output")) model.summary() diff --git a/experiments/era5-vs-oper.py b/experiments/era5-vs-oper.py index 647b86ea..7bd6d680 100644 --- a/experiments/era5-vs-oper.py +++ b/experiments/era5-vs-oper.py @@ -1,5 +1,8 @@ # flake8: noqa -from tensorflow.keras.layers import Dense, Flatten, Input, Reshape +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Flatten +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Reshape from tensorflow.keras.models import Sequential import climetlab as cml diff --git a/experiments/gaussian.py b/experiments/gaussian.py index eb6337e6..fd0856fc 100644 --- a/experiments/gaussian.py +++ b/experiments/gaussian.py @@ -11,7 +11,8 @@ import time import climetlab as cml -from climetlab.grids import lookup, unstructed_to_structed +from climetlab.grids import lookup +from climetlab.grids import unstructed_to_structed ds = cml.load_source("mars", param="2t", date=20220907, levtype="sfc") tree = unstructed_to_structed(ds[0], 15) diff --git a/experiments/io/io.py b/experiments/io/io.py index 324f1728..76985a20 100644 --- a/experiments/io/io.py +++ b/experiments/io/io.py @@ -79,9 +79,7 @@ def process(self, f): def write_log(self): logdir = self.LOGDIR os.makedirs(logdir, exist_ok=True) - with open( - f"{logdir}/{self.prefix}io.{self.seek}.{self.chunksize}.csv", "a" - ) as f: + with open(f"{logdir}/{self.prefix}io.{self.seek}.{self.chunksize}.csv", "a") as f: print(f"{self.seek}, {self.chunksize}, {self.tic.total}", file=f) diff --git a/experiments/t_draft.py b/experiments/t_draft.py index ca350312..daac4bd7 100644 --- a/experiments/t_draft.py +++ b/experiments/t_draft.py @@ -1,27 +1,21 @@ -import itertools -import math -import time - import numpy as np -from tqdm import tqdm import climetlab as cml # cmlds = cml.load_source("indexed-directory", "/lus/h2resw01/fws4/lb/project/ai-ml/test") cmlds = cml.load_source( "indexed-directory", - "/lus/h2resw01/fws4/lb/project/ai-ml/era5-for-ai" + "/lus/h2resw01/fws4/lb/project/ai-ml/era5-for-ai", # datetime=['date', 'time'] # datetime=['valid_date + valid_time'] ) cmlds = cmlds.sel(date=[19790501, 19790502]) # cmlds = cml.load_source("indexed-directory", "testdir") -ds = cmlds.sel(levtype='pl') +ds = cmlds.sel(levtype="pl") print(ds) -from climetlab.indexing.cube import Cubelet, FieldCube print() cube = ds.cube("date", "time", "param", "levelist") diff --git a/experiments/test-cubes2.py b/experiments/test-cubes2.py index 4484243e..4b69dd31 100644 --- a/experiments/test-cubes2.py +++ b/experiments/test-cubes2.py @@ -1,7 +1,7 @@ import os -import climetlab as cml -from climetlab.loaders import HDF5Loader, ZarrLoader, load +from climetlab.loaders import ZarrLoader +from climetlab.loaders import load load( ZarrLoader("out.zarr"), diff --git a/experiments/transposition.py b/experiments/transposition.py index 833ec600..08bddee1 100644 --- a/experiments/transposition.py +++ b/experiments/transposition.py @@ -287,15 +287,11 @@ def __init__( print(f"shape_i = {self.shape_i} values on each field (dim i)") print(f"shape_j = {self.shape_j} fields (dim j)") - print( - f"Total shape = {self.shape[0]} x {self.shape[1]} = {self.shape[0]*self.shape[1]}" - ) + print(f"Total shape = {self.shape[0]} x {self.shape[1]} = {self.shape[0]*self.shape[1]}") n_expected_fields = math.prod([len(v) for k, v in self.coords_j.items()]) if n_expected_fields != len(self.source): - raise ValueError( - f"Expecting {n_expected_fields} fields but got {len(self.source)}." - ) + raise ValueError(f"Expecting {n_expected_fields} fields but got {len(self.source)}.") def infer_shape_i(self): return math.prod(self.read_one_field(0).to_numpy().shape) @@ -329,14 +325,10 @@ def run(self): threading.Thread(target=worker(self.field_queue), daemon=True).start() for i in range(0, self.nthreads_write): - threading.Thread( - target=worker_on_file(self.block_queue, self.filename), daemon=True - ).start() + threading.Thread(target=worker_on_file(self.block_queue, self.filename), daemon=True).start() for i in range(0, self.nthreads_ready): - threading.Thread( - target=worker(self.ready_batch_of_fields_queue), daemon=True - ).start() + threading.Thread(target=worker(self.ready_batch_of_fields_queue), daemon=True).start() all_bofs = AllBatchesOfFields( self.ready_batch_of_fields_queue, @@ -406,9 +398,7 @@ def __init__(self, filename): print( f"Reading file {self.filename}, expecting {self.shape} = {math.prod(self.shape) * np.dtype(self.dtype).itemsize} bytes" ) - self.array = np.memmap( - self.filename, dtype=np.dtype(self.dtype), mode="r", shape=self.shape - ) + self.array = np.memmap(self.filename, dtype=np.dtype(self.dtype), mode="r", shape=self.shape) def to_xarray(self): import xarray as xr @@ -460,9 +450,7 @@ def test1(): def test2(): source = FakeSource(18, shape=(24)) - t = WritterClass( - source, filename="transpose.2.bin", n_gridpoints=200, n_features=10 - ) + t = WritterClass(source, filename="transpose.2.bin", n_gridpoints=200, n_features=10) t.run() r = TimeseriesReader(filename="transpose.2.bin") xds = r.to_xarray() @@ -479,9 +467,7 @@ def test3(): def test4(): source = FakeSource(6, 5, shape=(24, 36)) - t = WritterClass( - source, filename="transpose.4.bin", n_gridpoints=200, n_features=50 - ) + t = WritterClass(source, filename="transpose.4.bin", n_gridpoints=200, n_features=50) t.run() r = TimeseriesReader(filename="transpose.4.bin") xds = r.to_xarray() diff --git a/experiments/virtual2.py b/experiments/virtual2.py index a57f01e3..c3484bba 100644 --- a/experiments/virtual2.py +++ b/experiments/virtual2.py @@ -1,7 +1,10 @@ # flake8: noqa import tensorflow as tf from keras import backend as K -from tensorflow.keras.layers import Dense, Flatten, Input, Reshape +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Flatten +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Reshape from tensorflow.keras.models import Sequential import climetlab as cml @@ -30,9 +33,7 @@ def dataset(ds): options = tf.data.Options() options.threading.private_threadpool_size = 10 options.deterministic = False - options.experimental_distribute.auto_shard_policy = ( - tf.data.experimental.AutoShardPolicy.DATA - ) + options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA return ( ds.to_tfdataset() diff --git a/old-tests/indexing/_test_indexing_availability.py b/old-tests/indexing/_test_indexing_availability.py deleted file mode 100644 index 6bfab721..00000000 --- a/old-tests/indexing/_test_indexing_availability.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os - -import climetlab as cml - -TEST_DIR = os.path.join(os.path.dirname(__file__), "test_indexing_tmpdir") - - -def _test_directory_source_availability(): - params = ["z", "t"] - levels = [500, 850] - ds = cml.load_source( - "indexed-directory", - TEST_DIR, - level=levels, - variable=params, - date=20220929, - time="1200", - ) - - # for i in ds.index.db.dump_dicts(): - - print(len(ds)) - print(ds.availability) - - -if __name__ == "__main__": - from climetlab.testing import main - - main(__file__) - # test_directory_source_with_none_2( - # test_directory_source_with_none_1( - # test_directory_source_availability() - # test_directory_source_order_with_order_by_method_1( - # ["z", "t"], - # [500, 850], - # ) diff --git a/old-tests/indexing/_test_indexing_order_by.py b/old-tests/indexing/_test_indexing_order_by.py deleted file mode 100644 index 321b6607..00000000 --- a/old-tests/indexing/_test_indexing_order_by.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os -import sys - -import pytest - -here = os.path.dirname(__file__) -sys.path.insert(0, here) -from indexing_fixtures import check_sel_and_order, get_fixtures # noqa: E402 - - -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize("levels", ([500, 850], [850, 500])) -# @pytest.mark.parametrize("source_name", ["indexed-directory", "list-of-dicts", "file"]) -@pytest.mark.parametrize("source_name", ["indexed-directory"]) -def test_indexing_order_by_with_request(params, levels, source_name): - request = dict( - level=levels, - variable=params, - time="1200", - ) - - ds, _, total, n = get_fixtures(source_name, request) - - for i in ds: - print(i) - assert len(ds) == 4, len(ds) - - check_sel_and_order(ds, params, levels) - - -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize("levels", ([500, 850], [850, 500])) -# @pytest.mark.parametrize("source_name", ["indexed-directory", "list-of-dicts"]) -@pytest.mark.parametrize("source_name", ["indexed-directory"]) -def test_indexing_order_by_with_keyword(params, levels, source_name): - request = dict(variable=params, level=levels, date=20220929, time="1200") - request["order_by"] = dict(level=levels, variable=params) - - ds, _, total, n = get_fixtures(source_name, request) - - assert len(ds) == n, len(ds) - - check_sel_and_order(ds, params, levels) - - -@pytest.mark.parametrize("params", (["t", "u"],)) -@pytest.mark.parametrize("levels", ([500, 850],)) -@pytest.mark.parametrize("source_name", ["indexed-directory", "file"]) -def test_indexing_order_by_with_method_with_list(params, levels, source_name): - request = dict(variable=params, level=levels, date=20220929, time="1200") - order_by = ["level", "variable"] - - ds, _, total, n = get_fixtures(source_name, {}) - - assert len(ds) == total, len(ds) - - ds = ds.sel(**request) - assert len(ds) == n, len(ds) - - ds = ds.order_by(order_by) - assert len(ds) == n - - check_sel_and_order(ds, params, levels) - - -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize("levels", ([500, 850], [850, 500])) -# @pytest.mark.parametrize("source_name", ["indexed-directory", "list-of-dicts", "file"]) -@pytest.mark.parametrize("source_name", ["indexed-directory", "file"]) -def test_indexing_order_by_with_method(params, levels, source_name): - request = dict(variable=params, level=levels, date=20220929, time="1200") - order_by = dict(level=levels, variable=params) - - ds, _, total, n = get_fixtures(source_name, {}) - - print(ds) - print() - for i in ds: - print(i) - assert len(ds) == total, len(ds) - - ds = ds.sel(**request) - assert len(ds) == n, len(ds) - - ds = ds.order_by(order_by) - assert len(ds) == n - - check_sel_and_order(ds, params, levels) - - # keys = list(ds.coords.keys()) - # assert keys == ["levelist", "param"], keys - # coords_params = list(ds.coords["param"]) - # coords_levels = list(ds.coords["levelist"]) - # coords_levels = [int(x) for x in coords_levels] - # assert coords_params == params, (coords_params, params) - # assert coords_levels == levels, (coords_levels, levels) - - -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize( - "levels", ([500, 850], [850, 500], ["500", "850"], ["850", "500"]) -) -# @pytest.mark.parametrize("source_name", ["indexed-directory", "list-of-dicts", "file"]) -@pytest.mark.parametrize("source_name", ["indexed-directory"]) -def test_indexing_order_ascending_descending(params, levels, source_name): - request = dict(variable=params, level=levels, date=20220929, time="1200") - order_by = dict(level="descending", variable="ascending") - - ds, _, total, n = get_fixtures(source_name, {}) - - ds = ds.sel(**request) - assert len(ds) == 4, len(ds) - - ds = ds.order_by(order_by) - assert len(ds) == 4 - - assert ds[0].metadata("param") == min(params) - assert ds[1].metadata("param") == max(params) - assert ds[2].metadata("param") == min(params) - assert ds[3].metadata("param") == max(params) - - assert int(ds[0].metadata("level")) == max([int(x) for x in levels]) - assert int(ds[1].metadata("level")) == max([int(x) for x in levels]) - assert int(ds[2].metadata("level")) == min([int(x) for x in levels]) - assert int(ds[3].metadata("level")) == min([int(x) for x in levels]) - print() - - -# Index files have been created with : -# export BASEURL=https://object-store.os-api.cci1.ecmwf.int/climetlab/test-data/input/indexed-urls -# climetlab index_gribs $BASEURL/large_grib_1.grb > large_grib_1.grb.index -# climetlab index_gribs $BASEURL/large_grib_2.grb > large_grib_2.grb.index -# climetlab index_gribs large_grib_1.grb large_grib_2.grb --baseurl $BASEURL > global_index.index - -REQUEST_1 = { - "domain": "g", - "levtype": "pl", - "levelist": "850", - "date": "19970228", - "time": "2300", - "step": "0", - "param": "r", - "class": "ea", - "type": "an", - "stream": "oper", - "expver": "0001", - # - "n": ["1", "2"], -} -# source = load_source( -# "indexed-urls", -# baseurl + "/test-data/input/indexed-urls/large_grib_{n}.grb", -# REQUEST_1, -# ) - -if __name__ == "__main__": - from climetlab.testing import main - - # test_indexing_order_by_with_request(["u", "t"], [500, 850], "list-of-dicts") - # test_indexing_order_by_with_method(["u", "t"], [500, 850], "file") - # test_indexing_order_by_with_method(["u", "t"], [500, 850], "indexed-directory") - # test_indexing_order_ascending_descending(["t", "u"], [500, 850], 'file') - # test_indexing_order_by_with_method_with_list(["t", "u"], [850, 500], 'indexed-directory') - # test_indexing_order_by_with_method(["t", "u"], [500, 850], 'indexed-directory') - - main(__file__) diff --git a/old-tests/indexing/_test_indexing_save.py b/old-tests/indexing/_test_indexing_save.py deleted file mode 100644 index 8b4d2854..00000000 --- a/old-tests/indexing/_test_indexing_save.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os -import sys - -import pytest - -import climetlab as cml -from climetlab.core.temporary import temp_file - -here = os.path.dirname(__file__) -sys.path.insert(0, here) -from indexing_fixtures import check_sel_and_order, get_fixtures # noqa: E402 - - -@pytest.mark.skipif(sys.platform == "win32", reason="Cannot unlink tmp file on Windows") -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize("levels", ([500, 850], [850, 500])) -@pytest.mark.parametrize( - "source_name", ["indexed-directory", "file", "indexed-url", "indexed-urls"] -) -def test_indexing_save(params, levels, source_name): - request = dict( - level=levels, - variable=params, - date=20220929, - time="1200", - ) - if ( - source_name == "indexed-url" or source_name == "indexed-urls" - ): # TODO: make all test data consistent - request["date"] = "19970101" - request["time"] = [1100, 1200] - - ds, _, total, n = get_fixtures(source_name, {}) - assert len(ds) == total, len(ds) - - ds = ds.sel(**request) - assert len(ds) == n, len(ds) - - ds = ds.order_by(level=levels, variable=params) - assert len(ds) == n, len(ds) - - if not ( - source_name == "indexed-url" or source_name == "indexed-urls" - ): # TODO: make all test data consistent - check_sel_and_order(ds, params, levels) - - with temp_file() as filename: - ds.save(filename) - ds = cml.load_source("file", filename) - - assert len(ds) == n - if not ( - source_name == "indexed-url" or source_name == "indexed-urls" - ): # TODO: make all test data consistent - check_sel_and_order(ds, params, levels) - - -if __name__ == "__main__": - from climetlab.testing import main - - # test_indexing_save(["t", "u"], [500, 850], "indexed-url") - - main(__file__) diff --git a/old-tests/indexing/_test_indexing_sel_extended.py b/old-tests/indexing/_test_indexing_sel_extended.py deleted file mode 100644 index e521cdd2..00000000 --- a/old-tests/indexing/_test_indexing_sel_extended.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - - -import pytest - -from climetlab import load_source -from climetlab.indexing import PerUrlIndex - -CML_BASEURL_S3 = "https://object-store.os-api.cci1.ecmwf.int/climetlab" -CML_BASEURL_CDS = "https://datastore.copernicus-climate.eu/climetlab" -CML_BASEURL_GET = "https://get.ecmwf.int/repository/test-data/climetlab" -CML_BASEURLS = [CML_BASEURL_S3, CML_BASEURL_GET, CML_BASEURL_CDS] - - -def check(ds, i, ref): - field = ds[i] - n = field.to_numpy() - mean = n.mean() - assert abs(mean - ref) < 1e-6, (mean, ref, field) - - -def retrieve_and_check(index, request, range_method=None, **kwargs): - print("--------") - print("range_method", range_method) - print("REQUEST", request) - - s = load_source( # noqa F841 - "indexed-urls", - index, - request, - range_method=range_method, - **kwargs, - ) - - # check that the downloaded gribs match the request - for grib in s: - for k, v in request.items(): - if k == "param": - k = "shortName" - assert check_grib_value(grib._get(k), v), (grib._get(k), v) - - -def check_grib_value(value, requested): - if isinstance(requested, (list, tuple)): - return any([check_grib_value(value, _v) for _v in requested]) - else: - try: - return int(value) == int(requested) - except (TypeError, ValueError): - return str(value) == str(requested) - - -@pytest.mark.long_test -@pytest.mark.parametrize("baseurl", CML_BASEURLS) -def test_per_url_index(baseurl): - index = PerUrlIndex( - f"{baseurl}/test-data/input/indexed-urls/large_grib_1.grb", - ) - request = dict(param="r", time="1000", date="19970101") - retrieve_and_check(index, request) - - -@pytest.mark.long_test -# @pytest.mark.parametrize("baseurl", CML_BASEURLS) -def test_per_url_index_2(): - baseurl = CML_BASEURL_S3 - index = PerUrlIndex( - f"{baseurl}/test-data/big.grib", - ) - request = dict(param="cin", date="20211125", step="6", number=["1", "3"]) - retrieve_and_check(index, request) - - -def dev(): - baseurl = CML_BASEURL_S3 - - index = PerUrlIndex( - f"{baseurl}/test-data/input/indexed-urls/large_grib_1.grb", - ) - - request = dict(param="r") - retrieve_and_check(index, request) - - request = dict(param="r", time="1000") - retrieve_and_check(index, request) - - request = dict(date="19970101") - retrieve_and_check(index, request) - - request = dict(param="r", time="1000", date="19970101") - retrieve_and_check(index, request) - - -def timing(): - baseurl = CML_BASEURL_S3 - baseurl = CML_BASEURL_CDS - index = PerUrlIndex( - f"{baseurl}/test-data/input/indexed-urls/large_grib_1.grb", - ) - - sizes = ["sharp(1,1)", "auto", "cluster"] - sizes = [] - for r in range(11, 24): # from 2k to 8M - sizes.append(f"blocked({2 ** r})") - - report = {} - for request in [ - dict(param="r"), - dict(param="r", time="1000"), - dict(date="19970101"), - dict(param="r", time="1000", date="19970101"), - ]: - times = [] - for n in sizes: - try: - elapsed = retrieve_and_check(index, request, range_method=n, force=True) - except Exception as e: - print(e) - times.append(-1) - continue - if n is None: - n = 0 - if n == "auto": - n = -1 - if n == "cluster": - n = 1 - if n == "sharp": - n = -2 - times.append((round(elapsed * 10) / 10.0, n)) - - report[tuple(request.items())] = request, sorted(times) - - for k, v in report.items(): - print(k) - print(v) - - -@pytest.mark.long_test -def test_grib_index_eumetnet(): - request = { - "param": "2ti", - "date": "20171228", - "step": ["0-24", "24-48", "48-72", "72-96", "96-120", "120-144", "144-168"], - # Parameters passed to the filename mangling - "url": "https://object-store.os-api.cci1.ecmwf.int/eumetnet-postprocessing-benchmark-training-dataset/", - "month": "12", - "year": "2017", - } - PATTERN = "{url}data/fcs/efi/EU_forecast_efi_params_{year}-{month}_0.grb" - ds = load_source("indexed-urls", PerUrlIndex(PATTERN), request) - assert len(ds) == 7, len(ds) - check(ds, 0, -0.16334878510300832) - check(ds, 1, -0.06413754859021915) - check(ds, 2, 0.23404628380396034) - check(ds, 3, 0.3207112379535552) - xds = ds.to_xarray() - print(xds) - - -if __name__ == "__main__": - from climetlab.testing import main - - main(__file__) - # test_per_url_index(CML_BASEURL_S3) - # test_indexed_s3(CML_BASEURL_S3) - # timing() - # from climetlab.testing import main - - # test_grib_index_eumetnet() diff --git a/old-tests/indexing/_test_indexing_serialisation.py b/old-tests/indexing/_test_indexing_serialisation.py deleted file mode 100644 index 00e55f0c..00000000 --- a/old-tests/indexing/_test_indexing_serialisation.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os -import sys - -import pytest - -from climetlab.utils.serialise import SERIALISATION, deserialise_state, serialise_state - -here = os.path.dirname(__file__) -sys.path.insert(0, here) -from indexing_fixtures import check_sel_and_order, get_fixtures # noqa: E402 - - -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize("levels", ([500, 850], [850, 500])) -@pytest.mark.parametrize( - "source_name", - [ - "indexed-directory", - # "list-of-dicts", - # "file", - ], -) -def test_indexing_pickle(params, levels, source_name): - request = dict( - level=levels, - variable=params, - date=20220929, - time="1200", - ) - - ds, __tmp, total, n = get_fixtures(source_name, {}) - assert len(ds) == total, len(ds) - - ds = ds.sel(**request) - ds = ds.order_by(level=levels, variable=params) - check_sel_and_order(ds, params, levels) - - assert len(ds) == n, (len(ds), ds, SERIALISATION) - state = serialise_state(ds) - ds = deserialise_state(state) - assert len(ds) == n, (len(ds), ds, SERIALISATION) - - check_sel_and_order(ds, params, levels) - - -if __name__ == "__main__": - from climetlab.testing import main - - main(__file__) diff --git a/old-tests/indexing/_test_indexing_sql.py b/old-tests/indexing/_test_indexing_sql.py deleted file mode 100644 index 8376a9c6..00000000 --- a/old-tests/indexing/_test_indexing_sql.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - - -import sys - -import pytest - -from climetlab.core.temporary import temp_file -from climetlab.indexing.database.json import JsonFileDatabase -from climetlab.indexing.database.sql import SqlDatabase - - -@pytest.mark.skipif( - sys.platform == "win32", - reason="file:// not working on Windows yet", -) -@pytest.mark.parametrize("cls", (SqlDatabase, JsonFileDatabase)) -def test_load(cls): - lst = [ - { - "_path": "data/01.grb", - "param": "z", - "domain": "g", - "levtype": "pl", - "levelist": 500, - "date": 19970101, - "time": 0, - "step": "0", - "class": "ea", - "type": "an", - "stream": "oper", - "expver": "0001", - "_offset": 0, - "_length": 23358, - }, - { - "_path": "data/01.grb", - "param": "z", - "domain": "g", - "levtype": "pl", - "levelist": 500, - "date": 19970101, - "time": 100, - "step": "0", - "class": "ea", - "type": "an", - "stream": "oper", - "expver": "0001", - "_offset": 23358, - "_length": 23358, - }, - { - "_path": "data/01.grb", - "param": "z", - "domain": "g", - "levtype": "pl", - "levelist": 500, - "date": 19970101, - "time": 200, - "step": "0", - "class": "ea", - "type": "an", - "stream": "oper", - "expver": "0001", - "_offset": 46716, - "_length": 23358, - }, - { - "_path": "data/01.grb", - "param": "z", - "domain": "g", - "levtype": "pl", - "levelist": 500, - "date": 19970101, - "time": 300, - "step": "0", - "class": "ea", - "type": "an", - "stream": "oper", - "expver": "0001", - "_offset": 70074, - "_length": 23358, - }, - { - "_path": "data/02.grb", - "param": "r", - "domain": "g", - "levtype": "pl", - "levelist": 850, - "date": 19970228, - "time": 2300, - "step": "0", - "class": "ea", - "type": "an", - "stream": "oper", - "expver": "0001", - "_offset": 94156098, - "_length": 23358, - }, - ] - # TmpDirectory() - with temp_file(extension=".db") as db_path: - db = cls(db_path) - db.load_iterator(lst) - for i, dic in enumerate(db.lookup_dicts()): - assert len(dic) == len(lst[i]) - for k, v in dic.items(): - assert lst[i][k] == v - - for i, part in enumerate(db.lookup_parts()): - assert part.path.endswith(lst[i]["_path"]) - assert part.length == lst[i]["_length"] - assert part.offset == lst[i]["_offset"] - - -if __name__ == "__main__": - from climetlab.testing import main - - main(__file__) - # test_load(SqlDatabase) diff --git a/old-tests/indexing/_test_indexing_to_xxx.py b/old-tests/indexing/_test_indexing_to_xxx.py deleted file mode 100644 index 9191b4bf..00000000 --- a/old-tests/indexing/_test_indexing_to_xxx.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os -import sys - -import numpy as np -import pytest - -here = os.path.dirname(__file__) -sys.path.insert(0, here) -from indexing_fixtures import get_fixtures # noqa: E402 - - -@pytest.mark.parametrize("params", (["t", "u"], ["u", "t"])) -@pytest.mark.parametrize("levels", ([500, 850], [850, 500])) -@pytest.mark.parametrize( - "source_name", - [ - "indexed-directory", - # "list-of-dicts", - # "file", - ], -) -def test_indexing_to_xarray(params, levels, source_name): - request = dict(level=levels, variable=params, date=20220929, time="1200") - - ds, __tmp, total, n = get_fixtures(source_name, {}) - - ds = ds.sel(**request) - ds = ds.order_by(level=levels, variable=params) - assert len(ds) == n, len(ds) - - ds.to_xarray() - - -@pytest.mark.parametrize("params", (["u", "t"],)) -@pytest.mark.parametrize("levels", ([1000, 850],)) -@pytest.mark.parametrize( - "source_name", - [ - "indexed-directory", - # "list-of-dicts", - # "file", - ], -) -def test_indexing_to_numpy(params, levels, source_name): - request = dict(level=levels, variable=params, date=20220929, time="1200") - - ds, __tmp, total, n = get_fixtures(source_name, {}) - ds = ds.sel(**request) - ds = ds.order_by(level=levels, variable=params) - assert len(ds) == n, len(ds) - - print(ds[0].to_numpy().mean()) - print(ds[1].to_numpy().mean()) - print(ds[2].to_numpy().mean()) - print(ds[3].to_numpy().mean()) - - assert np.abs(ds[0].to_numpy().mean() - 0.5083630135046184) < 10e-6 - assert np.abs(ds[1].to_numpy().mean() - 281.73044231454605) < 10e-6 - assert np.abs(ds[2].to_numpy().mean() - 1.9032698938640222) < 10e-6 - assert np.abs(ds[3].to_numpy().mean() - 274.671260493251) < 10e-6 - - -if __name__ == "__main__": - from climetlab.testing import main - - main(__file__) diff --git a/old-tests/indexing/index.jsonl b/old-tests/indexing/index.jsonl deleted file mode 100644 index 47c5f1bb..00000000 --- a/old-tests/indexing/index.jsonl +++ /dev/null @@ -1,5 +0,0 @@ -{"_path": "data/01.grb", "domain": "g", "levtype": "pl", "levelist": "500", "date": "19970101", "time": "0000", "step": "0", "param": "129.128", "class": "ea", "type": "an", "stream": "oper", "expver": "0001", "_offset": 0, "_length": 23358} -{"_path": "data/01.grb", "domain": "g", "levtype": "pl", "levelist": "500", "date": "19970101", "time": "0100", "step": "0", "param": "129.128", "class": "ea", "type": "an", "stream": "oper", "expver": "0001", "_offset": 23358, "_length": 23358} -{"_path": "data/01.grb", "domain": "g", "levtype": "pl", "levelist": "500", "date": "19970101", "time": "0200", "step": "0", "param": "129.128", "class": "ea", "type": "an", "stream": "oper", "expver": "0001", "_offset": 46716, "_length": 23358} -{"_path": "data/01.grb", "domain": "g", "levtype": "pl", "levelist": "500", "date": "19970101", "time": "0300", "step": "0", "param": "129.128", "class": "ea", "type": "an", "stream": "oper", "expver": "0001", "_offset": 70074, "_length": 23358} -{"_path": "data/02.grb", "domain": "g", "levtype": "pl", "levelist": "850", "date": "19970228", "time": "2300", "step": "0", "param": "157.128", "class": "ea", "type": "an", "stream": "oper", "expver": "0001", "_offset": 94156098, "_length": 23358} \ No newline at end of file diff --git a/old-tests/indexing/indexing_fixtures.py b/old-tests/indexing/indexing_fixtures.py deleted file mode 100644 index 35804ddd..00000000 --- a/old-tests/indexing/indexing_fixtures.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import os -import warnings - -import climetlab as cml -from climetlab.core.temporary import temp_directory, temp_file -from climetlab.readers.grib.index import GribFieldSet -from climetlab.testing import climetlab_file - -CML_BASEURL_S3 = "https://object-store.os-api.cci1.ecmwf.int/climetlab" -CML_BASEURL_CDS = "https://datastore.copernicus-climate.eu/climetlab" -CML_BASEURL_GET = "https://get.ecmwf.int/repository/test-data/climetlab" -CML_BASEURLS = [CML_BASEURL_S3, CML_BASEURL_GET, CML_BASEURL_CDS] - - -TEST_GRIB_FILES = [ - climetlab_file(p) - for p in [ - "docs/examples/test.grib", - "docs/examples/test4.grib", - ] -] - - -def dir_with_grib_files(): - tmp = temp_directory() - _build_dir_with_grib_files(tmp.path) - return tmp - - -def _build_dir_with_grib_files(testdir): - os.makedirs(testdir, exist_ok=True) - for p in ["t", "u", "v"]: - ds = cml.load_source("mars", param=p, date=20220929, grid="1/1") - ds.save(os.path.join(testdir, p + ".grib")) - - -def unique_grib_file(): - tmp = temp_file() - _build_unique_grib_file(tmp.path) - return tmp - - -def _build_unique_grib_file(path): - ds = cml.load_source("mars", param=["t", "u", "v"], date=20220929, grid="1/1") - ds.save(path) - - -def list_of_dicts(): - prototype = { - "gridType": "regular_ll", - "Nx": 2, - "Ny": 3, - "distinctLatitudes": [-10.0, 0.0, 10.0], - "distinctLongitudes": [0.0, 10.0], - "_param_id": 167, - "values": [[1, 2], [3, 4], [5, 6]], - "date": "20220929", - "time": "1200", - } - return [ - {"param": "t", "levelist": 500, **prototype}, - {"param": "t", "levelist": 850, **prototype}, - {"param": "u", "levelist": 500, **prototype}, - {"param": "u", "levelist": 850, **prototype}, - {"param": "d", "levelist": 850, **prototype}, - {"param": "d", "levelist": 600, **prototype}, - ] - - -class GribIndexFromDicts(GribFieldSet): - def __init__(self, list_of_dicts, *args, **kwargs): - self.list_of_dicts = list_of_dicts - super().__init__(*args, **kwargs) - - def __getitem__(self, n): - class VirtualGribField(dict): - def metadata(_self, n): - try: - if n == "level": - n = "levelist" - if n == "shortName": - n = "param" - if n == "paramId": - n = "_param_id" - return _self[n] - except KeyError: - warnings.warn("Cannot find all metadata keys.") - - @property - def values(self, n): - return self["values"] - - return VirtualGribField(self.list_of_dicts[n]) - - def __len__(self): - return len(self.list_of_dicts) - - -def get_fixtures_directory(request): - tmp = dir_with_grib_files() - total, n = 18, 4 - ds = cml.load_source("indexed-directory", tmp.path, **request) - return ds, tmp, total, n - - -def get_fixtures_file(request): - tmp = unique_grib_file() - total, n = 18, 4 - ds = cml.load_source("file", tmp.path, **request) - return ds, tmp, total, n - - -def get_fixtures_list_of_dicts(request): - tmp = list_of_dicts() - total, n = 6, 4 - ds = GribIndexFromDicts(tmp, **request) - ds = ds.mutate() - return ds, tmp, total, n - - -def get_fixtures_indexed_url(request): - baseurl = CML_BASEURL_CDS - ds = cml.load_source( - "indexed-url", - f"{baseurl}/test-data/input/indexed-urls/large_grib_1.grb", - **request, - ) - return ds, None, 4464, 2 - - -def get_fixtures_indexed_urls(request): - baseurl = CML_BASEURL_CDS - request = dict(**request) - request["n"] = [1, 2] - request["baseurl"] = baseurl - ds = cml.load_source( - "indexed-urls", - "{baseurl}/test-data/input/indexed-urls/large_grib_{n}.grb", - request, - ) - return ds, None, 8496, 2 - - # mean = float(s.to_xarray()["r"].mean()) - # assert abs(mean - 70.34426879882812) < 0.0000001, mean - - -def get_fixtures(source_name, *args, **kwargs): - return { - "indexed-directory": get_fixtures_directory, - "file": get_fixtures_file, - "list-of-dicts": get_fixtures_list_of_dicts, - "indexed-url": get_fixtures_indexed_url, - "indexed-urls": get_fixtures_indexed_urls, - }[source_name](*args, **kwargs) - - -def check_sel_and_order(ds, params, levels): - assert ds[0].metadata("param") == params[0] - assert ds[1].metadata("param") == params[1], (ds[1].metadata("param"), params[1]) - assert ds[2].metadata("param") == params[0] - assert ds[3].metadata("param") == params[1] - - assert ds[0].metadata("level") == levels[0] - assert ds[1].metadata("level") == levels[0] - assert ds[2].metadata("level") == levels[1] - assert ds[3].metadata("level") == levels[1] diff --git a/old-tests/indexing/test_indexing_cli.py b/old-tests/indexing/test_indexing_cli.py deleted file mode 100644 index d939d67c..00000000 --- a/old-tests/indexing/test_indexing_cli.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import shutil -import sys - -import numpy as np -import pytest - -import climetlab as cml -from climetlab.core.temporary import temp_directory -from climetlab.scripts.main import CliMetLabApp -from climetlab.testing import build_testdata - - -@pytest.mark.skipif(sys.platform == "win32", reason="Not supported on windows") -def test_indexing_cli_index_directory(): - dir = build_testdata() - print("Using data in ", dir) - - with temp_directory() as tmpdir: - shutil.copytree(dir, tmpdir, dirs_exist_ok=True) - - "lsm.grib", - "pl/climetlab.json", - "pl/u.grib", - "pl/v.grib", - "pl/z.grib", - "sfc/2t.grib", - "sfc/climetlab.json", - "sfc/tp.grib", - source1 = cml.load_source("file", tmpdir, filter="*.grib") - - app = CliMetLabApp() - cmd = f"index_directory {tmpdir}" - app.onecmd(cmd) - - source2 = cml.load_source("indexed-directory", tmpdir) - - source1 = source1.order_by("param", "time", "date") - source2 = source2.order_by("param", "time", "date") - - assert len(source1) == len(source2) - - for i in range(len(source1)): - f1 = source1[i] - f2 = source2[i] - assert str(f1) == str(f2), (f1, f2) - assert np.all(f1.to_numpy() == f2.to_numpy()) - - -# def _test_indexing_cli_export_cache(): -# with cd(build_testdata()) as dir: -# print("Using data in ", dir) -# with temp_directory() as cache_dir: -# with settings.temporary(): -# settings.set("cache-directory", cache_dir) - -# app = CliMetLabApp() -# app.onecmd(f"index_directory {dir}") -# # app.onecmd(f'export_cache --match "era5" {export_dir}') - -# exported_files = glob.glob(os.path.join(export_dir, "*")) -# assert len(exported_files) == 2, exported_files - -# target = f"{export_dir}/{os.path.basename(original)}" -# assert filecmp.cmp(original, target), (original, target) - -# check_len(source) - - -if __name__ == "__main__": - from climetlab.testing import main - - main(__file__) - # test_script_index_directory() diff --git a/old-tests/indexing/test_indexing_len.py b/old-tests/indexing/test_indexing_len.py deleted file mode 100644 index 2728402c..00000000 --- a/old-tests/indexing/test_indexing_len.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - -import climetlab as cml -from climetlab.testing import TEST_DATA_URL_INPUT_GRIB, build_testdata, cd - - -def check_len(source): - assert len(source) == 337 - - -def test_indexing_len_for_source_file(): - with cd(build_testdata()) as dir: - print("Using data in ", dir) - - source = cml.load_source("file", "all.grib") - check_len(source) - - -def test_indexing_len_for_source_url(): - source = cml.load_source("url", f"{TEST_DATA_URL_INPUT_GRIB}/all.grib") - check_len(source) - - -def test_indexing_len_for_source_multi(): - with cd(build_testdata()) as dir: - print("Using data in ", dir) - - pl = cml.load_source("file", "pl", filter="*.grib") - sfc = cml.load_source("file", "sfc", filter="*.grib") - lsm = cml.load_source("file", "lsm.grib") - source = cml.load_source("multi", [pl, sfc, lsm]) - - check_len(source) - - -if __name__ == "__main__": - # from climetlab.testing import main - - # main(__file__) - test_indexing_len_for_source_url() - test_indexing_len_for_source_file() diff --git a/pyproject.toml b/pyproject.toml index 120eecbf..364c4cb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,102 @@ -[tool.black] -extend-exclude = ''' -( -experiments/* -) -''' - -[tool.isort] -extend_skip = ["grib.py", "experimentsy"] -profile = "black" \ No newline at end of file +#!/usr/bin/env python +# (C) Copyright 2024 ECMWF. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +# https://packaging.python.org/en/latest/guides/writing-pyproject-toml/ + +[build-system] +requires = ["setuptools>=60", "setuptools-scm>=8.0"] + +[project] +description = "Handling of climate/meteorological dataa." +name = "climetlab" + +dynamic = ["version"] +license = { file = "LICENSE" } +requires-python = ">=3.9" + +authors = [ + { name = "European Centre for Medium-Range Weather Forecasts (ECMWF)", email = "software.support@ecmwf.int" }, +] + +keywords = ["tools", "datasets", "ai"] + +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Operating System :: OS Independent", +] + +dependencies = [ + "requests", + "branca", + "cdsapi", + "cfgrib>=0.9.10.1", + "dask", + "earthkit-meteo", + "eccodes>=1.3.0", + "ecmwf-api-client>=1.6.1", + "ecmwf-opendata>=0.1.2", + "ecmwflibs>=0.6.3", + "entrypoints", + "filelock", + "imageio", + "lru-dict", + "magics>=1.5.6", + "markdown", + "multiurl>=0.1.0", + "netcdf4", + "numpngw", + "pdbufr", + "pyodc", + "pyyaml", + "termcolor", + "toolz", + "tqdm", + "xarray", +] + +[project.optional-dependencies] +"tensorflow" = ["tensorflow"] +"zarr" = ["zarr", "s3fs"] +"interactive" = ["skinnywms", "folium>=0.12.1"] + +[project.urls] +Homepage = "https://github.com/ecmwf/climetlab/" +Documentation = "https://climetlab.readthedocs.io/" +Repository = "https://github.com/ecmwf/climetlab/" +Issues = "https://github.com/ecmwf/climetlab/issues" +# Changelog = "https://github.com/ecmwf/climetlab/CHANGELOG.md" + +[project.scripts] +climetlab = "climetlab.scripts:main" + +[tool.setuptools_scm] +version_file = "src/climetlab/_version.py" + +[tool.setuptools.package-data] + +"climetlab.config" = ["*.yaml"] +"climetlab.data.dask" = ["*.yaml"] +"climetlab.data.domains" = ["*.yaml"] +"climetlab.data.layers" = ["*.yaml"] +"climetlab.data.projections" = ["*.yaml"] +"climetlab.data.styles" = ["*.yaml"] +"climetlab.datasets" = ["*.yaml"] +"climetlab.datasets.meteonet_samples" = ["*.yaml"] +"climetlab.datasets.meteonet_samples.styles" = ["*.yaml"] +"climetlab.plotting.backends.magics" = ["*.yaml"] +"climetlab.data.css" = ["*.css"] +"climetlab.sources" = ["*.grib"] diff --git a/setup.py b/setup.py deleted file mode 100644 index 05f2939e..00000000 --- a/setup.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python3 - -# (C) Copyright 2020 ECMWF. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. -# - - -import io -import os.path -import sys - -import setuptools - - -def read(fname): - file_path = os.path.join(os.path.dirname(__file__), fname) - return io.open(file_path, encoding="utf-8").read() - - -version = None -lines = read("climetlab/version").split("\n") -if lines: - version = lines[0] - - -assert version - -numpy = "numpy" -pandas = "pandas" -dask = "dask" - - -if sys.version_info < (3, 8): - if not os.environ.get("CLIMETLAB_RUN_UNSUPPORTED_PYTHON_VERSION"): - raise Exception( - "Python version " - + str(sys.version_info) - + " is not supported. Python 3.8 is required." - ) - -install_requires = [] -if sys.version_info < (3, 7): - install_requires += [ - "numpy<1.20", - "pandas==1.1.5", - "dataclasses", # Needed by dask - "xarray", - ] -else: - install_requires += ["numpy", "pandas", "xarray>=0.19.0"] - -install_requires += [ - # need to install to avoid conflict between aiohttp (dependency of s3fs) and requests (cdsapi) - # "chardet>=3.0,<4.0", - # "aiohttp>=3.7.2", - # -- - "requests", - # "zarr", - # "s3fs", - "dask", - "netcdf4", - "cfgrib>=0.9.10.1", - "cdsapi", - "ecmwf-api-client>=1.6.1", - "earthkit-meteo", - "multiurl>=0.1.0", - "ecmwf-opendata>=0.1.2", - "tqdm", - "eccodes>=1.3.0", - "magics>=1.5.6", - "pdbufr", - "pyodc", - "toolz", - "filelock", - "pyyaml", - "markdown", - "termcolor", - "entrypoints", - "branca", - "imageio", - "numpngw", - "lru-dict", -] -if not os.environ.get("CLIMETLAB_DO_NOT_INSTALL_ECMWFLIBS"): - install_requires.append("ecmwflibs") - -extras_require = { - "interactive": [ - "skinnywms", - "folium>=0.12.1", - ], - "tensorflow": [ - "tensorflow", - ], - "zarr": [ - "zarr", - "s3fs", - ], -} - - -full = [] -for k, v in extras_require.items(): - full += v -full += install_requires - -extras_require["full"] = full - - -setuptools.setup( - name="climetlab", - version=version, - author="ECMWF", - author_email="software.support@ecmwf.int", - license="Apache 2.0", - url="https://github.com/ecmwf/climetlab", - description="Handling of climate/meteorological data", - long_description=read("README.md"), - long_description_content_type="text/markdown", - packages=setuptools.find_packages(), - include_package_data=True, - install_requires=install_requires, - extras_require=extras_require, - zip_safe=True, - classifiers=[ - "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Operating System :: OS Independent", - ], - tests_require=[ - "pytest", - "nbconvert", - "jupyter", - "pytest-cov", - "climetlab-demo-dataset", - "climetlab-demo-source", - ], - test_suite="tests", - entry_points={"console_scripts": ["climetlab=climetlab.scripts:main"]}, -) diff --git a/climetlab/__init__.py b/src/climetlab/__init__.py similarity index 87% rename from climetlab/__init__.py rename to src/climetlab/__init__.py index 03c59fa6..b39c5008 100644 --- a/climetlab/__init__.py +++ b/src/climetlab/__init__.py @@ -7,6 +7,7 @@ # nor does it submit to any jurisdiction. # +from ._version import __version__ from .aaa import loaded_modules from .arguments.transformers import ALL from .core.caching import CACHE as cache @@ -17,15 +18,17 @@ from .datasets import get_dataset as dataset # so the user can do: cml.dataset(...) from .datasets import load_dataset from .distributed.dask import start_dask -from .plotting import interactive_map, new_plot, new_table +from .plotting import interactive_map +from .plotting import new_plot +from .plotting import new_table from .plotting import options as plotting_options from .plotting import plot_map from .readers import Reader from .readers.grib.output import new_grib_output from .sources import Source from .sources import get_source as source # so the user can do: cml.source(...) -from .sources import load_source, load_source_lazily -from .version import __version__ +from .sources import load_source +from .sources import load_source_lazily from .wrappers import Wrapper __all__ = [ diff --git a/climetlab/__main__.py b/src/climetlab/__main__.py similarity index 100% rename from climetlab/__main__.py rename to src/climetlab/__main__.py diff --git a/climetlab/aaa.py b/src/climetlab/aaa.py similarity index 100% rename from climetlab/aaa.py rename to src/climetlab/aaa.py diff --git a/climetlab/arguments/__init__.py b/src/climetlab/arguments/__init__.py similarity index 100% rename from climetlab/arguments/__init__.py rename to src/climetlab/arguments/__init__.py diff --git a/climetlab/arguments/args_kwargs.py b/src/climetlab/arguments/args_kwargs.py similarity index 100% rename from climetlab/arguments/args_kwargs.py rename to src/climetlab/arguments/args_kwargs.py diff --git a/climetlab/arguments/argument.py b/src/climetlab/arguments/argument.py similarity index 91% rename from climetlab/arguments/argument.py rename to src/climetlab/arguments/argument.py index 3dc84a6f..7e8fdf1f 100644 --- a/climetlab/arguments/argument.py +++ b/src/climetlab/arguments/argument.py @@ -9,11 +9,9 @@ import logging from climetlab.arguments.climetlab_types import infer_type -from climetlab.arguments.transformers import ( - AliasTransformer, - FormatTransformer, - TypeTransformer, -) +from climetlab.arguments.transformers import AliasTransformer +from climetlab.arguments.transformers import FormatTransformer +from climetlab.arguments.transformers import TypeTransformer LOG = logging.getLogger(__name__) diff --git a/climetlab/arguments/climetlab_types.py b/src/climetlab/arguments/climetlab_types.py similarity index 94% rename from climetlab/arguments/climetlab_types.py rename to src/climetlab/arguments/climetlab_types.py index e93bf1cb..c995b554 100644 --- a/climetlab/arguments/climetlab_types.py +++ b/src/climetlab/arguments/climetlab_types.py @@ -34,9 +34,7 @@ class NonListMixin: def cast(self, value): if isinstance(value, (tuple, list)): if len(value) != 1: - raise TypeError( - f"Expected non-list but was {type(value).__name__}: {value}" - ) + raise TypeError(f"Expected non-list but was {type(value).__name__}: {value}") value = value[0] return self._cast(value) @@ -96,9 +94,7 @@ def same(a, b): if same(value, v): return v - raise ValueError( - f"Invalid value '{value}', possible values are {self.values} ({self.__class__.__name__})" - ) + raise ValueError(f"Invalid value '{value}', possible values are {self.values} ({self.__class__.__name__})") def update(self, availability): # TODO: if value is none : use availability w @@ -175,9 +171,7 @@ def cast(self, value): value = list(range(int(bits[0]), int(bits[2]) + 1, 1)) elif len(bits) == 5 and bits[1].lower() == "to" and bits[3].lower() == "by": - value = list( - range(int(bits[0]), int(bits[2]) + int(bits[4]), int(bits[4])) - ) + value = list(range(int(bits[0]), int(bits[2]) + int(bits[4]), int(bits[4]))) return super().cast(value) @@ -377,9 +371,7 @@ def _infer_type(**kwargs): # normalize("name", ["a", "b", "c"]) and similar if isinstance(values, (list, tuple)): # and type is None: if type not in (None, "enum", "enum-list"): - LOG.warning( - f"Type ignored with enums, values={values}, type={type} and multiple={multiple}" - ) + LOG.warning(f"Type ignored with enums, values={values}, type={type} and multiple={multiple}") if multiple is None and type == "enum-list": multiple = True @@ -421,6 +413,4 @@ def _infer_type(**kwargs): if multiple is None: return AnySingleOrListType() - raise ValueError( - f"Cannot infer type from values={values}, type={type} and multiple={multiple}" - ) + raise ValueError(f"Cannot infer type from values={values}, type={type} and multiple={multiple}") diff --git a/climetlab/arguments/guess.py b/src/climetlab/arguments/guess.py similarity index 100% rename from climetlab/arguments/guess.py rename to src/climetlab/arguments/guess.py diff --git a/climetlab/arguments/input_manager.py b/src/climetlab/arguments/input_manager.py similarity index 96% rename from climetlab/arguments/input_manager.py rename to src/climetlab/arguments/input_manager.py index db31f35c..dc86a8b7 100644 --- a/climetlab/arguments/input_manager.py +++ b/src/climetlab/arguments/input_manager.py @@ -9,7 +9,8 @@ import logging from .argument import Argument -from .transformers import AvailabilityChecker, KwargsAliasTransformer +from .transformers import AvailabilityChecker +from .transformers import KwargsAliasTransformer LOG = logging.getLogger(__name__) @@ -79,9 +80,7 @@ def build_pipeline(self): a.add_format_transformers(self._pipeline) def apply_to_kwargs_before_default(self, kwargs): - LOG.debug( - f"Apply pipeline to kwargs before resolving default values: {safe_to_str(kwargs)}" - ) + LOG.debug(f"Apply pipeline to kwargs before resolving default values: {safe_to_str(kwargs)}") for t in self.pipeline: if hasattr(t, "name"): LOG.debug(f" - {t.name}: apply {t}.") diff --git a/climetlab/arguments/transformers.py b/src/climetlab/arguments/transformers.py similarity index 97% rename from climetlab/arguments/transformers.py rename to src/climetlab/arguments/transformers.py index 7102e1bd..f59c0f57 100644 --- a/climetlab/arguments/transformers.py +++ b/src/climetlab/arguments/transformers.py @@ -182,8 +182,7 @@ def reversed_aliases(self): for target, aliases in self.aliases.items(): for alias in aliases: assert alias not in reversed, ( - "Error: Multiple target value for alias " - f" argument '{alias}': '{target}' and '{reversed[alias]}'" + "Error: Multiple target value for alias " f" argument '{alias}': '{target}' and '{reversed[alias]}'" ) reversed[alias] = target return reversed diff --git a/climetlab/config/units.yaml b/src/climetlab/config/units.yaml similarity index 100% rename from climetlab/config/units.yaml rename to src/climetlab/config/units.yaml diff --git a/climetlab/core/__init__.py b/src/climetlab/core/__init__.py similarity index 100% rename from climetlab/core/__init__.py rename to src/climetlab/core/__init__.py diff --git a/climetlab/core/caching.py b/src/climetlab/core/caching.py similarity index 94% rename from climetlab/core/caching.py rename to src/climetlab/core/caching.py index c053962c..a63be108 100644 --- a/climetlab/core/caching.py +++ b/src/climetlab/core/caching.py @@ -71,9 +71,7 @@ def __init__(self, path): self.total = st.f_blocks * st.f_frsize self.avail = st.f_bavail * st.f_frsize - self.percent = int( - float(self.total - self.avail) / float(self.total) * 100 + 0.5 - ) + self.percent = int(float(self.total - self.avail) / float(self.total) * 100 + 0.5) def __repr__(self): return ( @@ -227,13 +225,9 @@ def _latest_date(self): """Returns the latest date to be used when purging the cache. So we do not purge files being downloaded.""" with self.connection as db: - latest = db.execute( - "SELECT MIN(creation_date) FROM cache WHERE size IS NULL" - ).fetchone()[0] + latest = db.execute("SELECT MIN(creation_date) FROM cache WHERE size IS NULL").fetchone()[0] if latest is None: - latest = db.execute( - "SELECT MAX(creation_date) FROM cache WHERE size IS NOT NULL" - ).fetchone()[0] + latest = db.execute("SELECT MAX(creation_date) FROM cache WHERE size IS NOT NULL").fetchone()[0] if latest is None: latest = datetime.datetime.now() if isinstance(latest, str): @@ -326,9 +320,7 @@ def _housekeeping(self, clean=False): continue full = os.path.join(top, name) - count = db.execute( - "SELECT count(*) FROM cache WHERE path=?", (full,) - ).fetchone()[0] + count = db.execute("SELECT count(*) FROM cache WHERE path=?", (full,)).fetchone()[0] if count > 0: continue @@ -353,9 +345,7 @@ def _housekeeping(self, clean=False): if parent is None: LOG.warning(f"CliMetLab cache: orphan found: {full}") else: - LOG.debug( - f"CliMetLab cache: orphan found: {full} with parent {parent}" - ) + LOG.debug(f"CliMetLab cache: orphan found: {full} with parent {parent}") self._register_cache_file( full, @@ -416,9 +406,7 @@ def _delete_entry(self, entry): LOG.warning( "Deleting entry %s", - json.dumps( - self._entry_to_dict(entry), indent=4, default=default_serialiser - ), + json.dumps(self._entry_to_dict(entry), indent=4, default=default_serialiser), ) total = 0 @@ -464,9 +452,7 @@ def _decache(self, bytes, purge=False): latest = datetime.datetime.now() if purge else self._latest_date() age = datetime.datetime.now() - latest age = age.days * 24 * 3600 + age.seconds - LOG.warning( - f"Decaching files oldest than {latest.isoformat()} (age: {humanize.seconds(age)})" - ) + LOG.warning(f"Decaching files oldest than {latest.isoformat()} (age: {humanize.seconds(age)})") for stmt in ( "SELECT * FROM cache WHERE size IS NOT NULL AND owner='orphans' AND creation_date < ?", @@ -534,9 +520,7 @@ def _register_cache_file(self, path, owner, args, parent=None): (path, owner, args, now, now, 1, parent), ) - return dict( - db.execute("SELECT * FROM cache WHERE path=?", (path,)).fetchone() - ) + return dict(db.execute("SELECT * FROM cache WHERE path=?", (path,)).fetchone()) def _cache_size(self): with self.connection as db: @@ -663,9 +647,7 @@ def cache_file( m = hashlib.sha256() m.update(owner.encode("utf-8")) - m.update( - json.dumps(args, sort_keys=True, default=default_serialiser).encode("utf-8") - ) + m.update(json.dumps(args, sort_keys=True, default=default_serialiser).encode("utf-8")) m.update(json.dumps(hash_extra, sort_keys=True).encode("utf-8")) m.update(json.dumps(extension, sort_keys=True).encode("utf-8")) @@ -701,9 +683,7 @@ def cache_file( lock = path + ".lock" with FileLock(lock): - if not os.path.exists( - path - ): # Check again, another thread/process may have created the file + if not os.path.exists(path): # Check again, another thread/process may have created the file owner_data = create(path + ".tmp", args) os.rename(path + ".tmp", path) diff --git a/climetlab/core/constants.py b/src/climetlab/core/constants.py similarity index 100% rename from climetlab/core/constants.py rename to src/climetlab/core/constants.py diff --git a/climetlab/core/data.py b/src/climetlab/core/data.py similarity index 94% rename from climetlab/core/data.py rename to src/climetlab/core/data.py index 79f0c603..9038506f 100644 --- a/climetlab/core/data.py +++ b/src/climetlab/core/data.py @@ -82,8 +82,7 @@ def _repr_html_(self): ] html.append( - "Definition:
%s
" - % (yaml.dump(self.data, default_flow_style=False),) + "Definition:
%s
" % (yaml.dump(self.data, default_flow_style=False),) ) html.append("") return "".join(html) @@ -170,10 +169,7 @@ def get_data_entry(kind, name, default=None, merge=False): if name not in files[kind]: if default is not None: return default - raise KeyError( - "No object '%s' in collection named '%s' (%s)" - % (name, kind, sorted(files[kind].keys())) - ) + raise KeyError("No object '%s' in collection named '%s' (%s)" % (name, kind, sorted(files[kind].keys()))) choices = files[kind][name].choices() assert len(choices) != 0 @@ -205,12 +201,7 @@ def is_active(owner, entry): ) choices = {k: v for k, v in choices.items() if is_active(k, v)} - selected = [ - v - for _, v in sorted( - choices.items(), key=lambda x: PRIORITIES.get(x[0], PRIORITIES["plugins"]) - ) - ] + selected = [v for _, v in sorted(choices.items(), key=lambda x: PRIORITIES.get(x[0], PRIORITIES["plugins"]))] if not merge: return selected[0] diff --git a/climetlab/core/index.py b/src/climetlab/core/index.py similarity index 96% rename from climetlab/core/index.py rename to src/climetlab/core/index.py index 6abf311e..1047fa64 100644 --- a/climetlab/core/index.py +++ b/src/climetlab/core/index.py @@ -15,7 +15,8 @@ from collections import defaultdict import climetlab as cml -from climetlab.core.order import build_remapping, normalize_order_by +from climetlab.core.order import build_remapping +from climetlab.core.order import normalize_order_by from climetlab.core.select import normalize_selection from climetlab.sources import Source @@ -137,9 +138,7 @@ def get(self, x): actions[k] = v continue - assert isinstance( - v, (list, tuple) - ), f"Invalid argument for {k}: {v} ({type(v)})" + assert isinstance(v, (list, tuple)), f"Invalid argument for {k}: {v} ({type(v)})" order = {} for i, key in enumerate(v): @@ -184,9 +183,7 @@ def sel(self, *args, remapping=None, **kwargs): selection = Selection(kwargs, remapping=remapping) - indices = ( - i for i, element in enumerate(self) if selection.match_element(element) - ) + indices = (i for i, element in enumerate(self) if selection.match_element(element)) return self.new_mask_index(self, indices) diff --git a/climetlab/core/initialise.py b/src/climetlab/core/initialise.py similarity index 100% rename from climetlab/core/initialise.py rename to src/climetlab/core/initialise.py diff --git a/climetlab/core/ipython.py b/src/climetlab/core/ipython.py similarity index 93% rename from climetlab/core/ipython.py rename to src/climetlab/core/ipython.py index 4c734a22..57b64ac2 100644 --- a/climetlab/core/ipython.py +++ b/src/climetlab/core/ipython.py @@ -51,9 +51,7 @@ def guess_which_ipython(): if ipython_active.__class__.__name__ == "ZMQInteractiveShell": return ("jupyter-lab", None) - if isinstance( - ipython_active.python_dir, str - ) and ipython_active.python_dir.endswith(".ipython"): + if isinstance(ipython_active.python_dir, str) and ipython_active.python_dir.endswith(".ipython"): return ("jupyter-lab", None) return ("unknown", None) @@ -117,7 +115,9 @@ def _identity(x, **kwargs): if ipython_active: from IPython.display import HTML from IPython.display import SVG as SVG_ipython - from IPython.display import Image, Markdown, display + from IPython.display import Image + from IPython.display import Markdown + from IPython.display import display def SVG(*args, **kwargs): import IPython diff --git a/climetlab/core/metadata.py b/src/climetlab/core/metadata.py similarity index 100% rename from climetlab/core/metadata.py rename to src/climetlab/core/metadata.py diff --git a/climetlab/core/order.py b/src/climetlab/core/order.py similarity index 95% rename from climetlab/core/order.py rename to src/climetlab/core/order.py index 17e8906b..53ab9763 100644 --- a/climetlab/core/order.py +++ b/src/climetlab/core/order.py @@ -144,12 +144,7 @@ def normalize_order_by(*args, **kwargs): _kwargs.update(kwargs) for k, v in _kwargs.items(): - if not ( - v is None - or callable(v) - or isinstance(v, (list, tuple, set)) - or v in ["ascending", "descending"] - ): + if not (v is None or callable(v) or isinstance(v, (list, tuple, set)) or v in ["ascending", "descending"]): raise ValueError(f"Unsupported order: {v} of type {type(v)} for key {k}") return _kwargs diff --git a/climetlab/core/plugins.py b/src/climetlab/core/plugins.py similarity index 99% rename from climetlab/core/plugins.py rename to src/climetlab/core/plugins.py index f27f13fd..f69e095e 100644 --- a/climetlab/core/plugins.py +++ b/src/climetlab/core/plugins.py @@ -18,7 +18,8 @@ import sys from collections import defaultdict from importlib import import_module -from typing import List, Union +from typing import List +from typing import Union import entrypoints diff --git a/climetlab/core/select.py b/src/climetlab/core/select.py similarity index 100% rename from climetlab/core/select.py rename to src/climetlab/core/select.py diff --git a/climetlab/core/settings.py b/src/climetlab/core/settings.py similarity index 98% rename from climetlab/core/settings.py rename to src/climetlab/core/settings.py index a37149ca..541576cb 100644 --- a/climetlab/core/settings.py +++ b/src/climetlab/core/settings.py @@ -17,9 +17,11 @@ import yaml +from climetlab._version import __version__ as VERSION from climetlab.utils.html import css -from climetlab.utils.humanize import as_bytes, as_percent, as_seconds -from climetlab.version import __version__ as VERSION +from climetlab.utils.humanize import as_bytes +from climetlab.utils.humanize import as_percent +from climetlab.utils.humanize import as_seconds LOG = logging.getLogger(__name__) diff --git a/climetlab/core/statistics.py b/src/climetlab/core/statistics.py similarity index 100% rename from climetlab/core/statistics.py rename to src/climetlab/core/statistics.py diff --git a/climetlab/core/temporary.py b/src/climetlab/core/temporary.py similarity index 100% rename from climetlab/core/temporary.py rename to src/climetlab/core/temporary.py diff --git a/climetlab/core/thread.py b/src/climetlab/core/thread.py similarity index 100% rename from climetlab/core/thread.py rename to src/climetlab/core/thread.py diff --git a/climetlab/dask/__init__.py b/src/climetlab/dask/__init__.py similarity index 100% rename from climetlab/dask/__init__.py rename to src/climetlab/dask/__init__.py diff --git a/climetlab/data/css/table.css b/src/climetlab/data/css/table.css similarity index 100% rename from climetlab/data/css/table.css rename to src/climetlab/data/css/table.css diff --git a/climetlab/data/dask/local.yaml b/src/climetlab/data/dask/local.yaml similarity index 68% rename from climetlab/data/dask/local.yaml rename to src/climetlab/data/dask/local.yaml index 95ec05fc..db022e69 100644 --- a/climetlab/data/dask/local.yaml +++ b/src/climetlab/data/dask/local.yaml @@ -1,4 +1,4 @@ --- dask: cluster_cls: local - scale: null \ No newline at end of file + scale: null diff --git a/climetlab/data/dask/slurm.yaml b/src/climetlab/data/dask/slurm.yaml similarity index 96% rename from climetlab/data/dask/slurm.yaml rename to src/climetlab/data/dask/slurm.yaml index 2378f6b5..508c75d4 100644 --- a/climetlab/data/dask/slurm.yaml +++ b/src/climetlab/data/dask/slurm.yaml @@ -12,4 +12,4 @@ dask: - "--ntasks=1" scheduler_options: dashboard_address: 12367 - scale: 8 \ No newline at end of file + scale: 8 diff --git a/climetlab/data/dask/ssh.yaml b/src/climetlab/data/dask/ssh.yaml similarity index 87% rename from climetlab/data/dask/ssh.yaml rename to src/climetlab/data/dask/ssh.yaml index eaf4b086..96ffa813 100644 --- a/climetlab/data/dask/ssh.yaml +++ b/src/climetlab/data/dask/ssh.yaml @@ -5,4 +5,4 @@ dask: hosts: - localhost - localhost - scale: null \ No newline at end of file + scale: null diff --git a/climetlab/data/domains/verification.yaml b/src/climetlab/data/domains/verification.yaml similarity index 99% rename from climetlab/data/domains/verification.yaml rename to src/climetlab/data/domains/verification.yaml index 9ac69433..71196168 100644 --- a/climetlab/data/domains/verification.yaml +++ b/src/climetlab/data/domains/verification.yaml @@ -5,7 +5,7 @@ # Using these local files instead of the shared ones requires proper setting # in the [vtb] section of ${VTB_HOME}/vtb/_config/locals.yaml # -doc: +doc: visualize: https://sites.ecmwf.int/mo5/verification/domains/ areas: diff --git a/climetlab/data/layers/default-background.yaml b/src/climetlab/data/layers/default-background.yaml similarity index 100% rename from climetlab/data/layers/default-background.yaml rename to src/climetlab/data/layers/default-background.yaml diff --git a/climetlab/data/layers/default-foreground.yaml b/src/climetlab/data/layers/default-foreground.yaml similarity index 100% rename from climetlab/data/layers/default-foreground.yaml rename to src/climetlab/data/layers/default-foreground.yaml diff --git a/climetlab/data/layers/example-foreground.yaml b/src/climetlab/data/layers/example-foreground.yaml similarity index 100% rename from climetlab/data/layers/example-foreground.yaml rename to src/climetlab/data/layers/example-foreground.yaml diff --git a/climetlab/data/layers/land-sea.yaml b/src/climetlab/data/layers/land-sea.yaml similarity index 98% rename from climetlab/data/layers/land-sea.yaml rename to src/climetlab/data/layers/land-sea.yaml index 4f4b70fa..61008822 100644 --- a/climetlab/data/layers/land-sea.yaml +++ b/src/climetlab/data/layers/land-sea.yaml @@ -12,4 +12,3 @@ magics: map_coastline_colour: tan map_grid_frame: off map_grid_frame_thickness: 5 - diff --git a/climetlab/data/projections/africa.yaml b/src/climetlab/data/projections/africa.yaml similarity index 100% rename from climetlab/data/projections/africa.yaml rename to src/climetlab/data/projections/africa.yaml diff --git a/climetlab/data/projections/asia.yaml b/src/climetlab/data/projections/asia.yaml similarity index 100% rename from climetlab/data/projections/asia.yaml rename to src/climetlab/data/projections/asia.yaml diff --git a/climetlab/data/projections/bonne.yaml b/src/climetlab/data/projections/bonne.yaml similarity index 100% rename from climetlab/data/projections/bonne.yaml rename to src/climetlab/data/projections/bonne.yaml diff --git a/climetlab/data/projections/collignon.yaml b/src/climetlab/data/projections/collignon.yaml similarity index 100% rename from climetlab/data/projections/collignon.yaml rename to src/climetlab/data/projections/collignon.yaml diff --git a/climetlab/data/projections/euro-atlantic.yaml b/src/climetlab/data/projections/euro-atlantic.yaml similarity index 100% rename from climetlab/data/projections/euro-atlantic.yaml rename to src/climetlab/data/projections/euro-atlantic.yaml diff --git a/climetlab/data/projections/europe-cylindrical.yaml b/src/climetlab/data/projections/europe-cylindrical.yaml similarity index 100% rename from climetlab/data/projections/europe-cylindrical.yaml rename to src/climetlab/data/projections/europe-cylindrical.yaml diff --git a/climetlab/data/projections/europe.yaml b/src/climetlab/data/projections/europe.yaml similarity index 100% rename from climetlab/data/projections/europe.yaml rename to src/climetlab/data/projections/europe.yaml diff --git a/climetlab/data/projections/global.yaml b/src/climetlab/data/projections/global.yaml similarity index 100% rename from climetlab/data/projections/global.yaml rename to src/climetlab/data/projections/global.yaml diff --git a/climetlab/data/projections/goode.yaml b/src/climetlab/data/projections/goode.yaml similarity index 100% rename from climetlab/data/projections/goode.yaml rename to src/climetlab/data/projections/goode.yaml diff --git a/climetlab/data/projections/mercator.yaml b/src/climetlab/data/projections/mercator.yaml similarity index 100% rename from climetlab/data/projections/mercator.yaml rename to src/climetlab/data/projections/mercator.yaml diff --git a/climetlab/data/projections/mollweide.yaml b/src/climetlab/data/projections/mollweide.yaml similarity index 100% rename from climetlab/data/projections/mollweide.yaml rename to src/climetlab/data/projections/mollweide.yaml diff --git a/climetlab/data/projections/north-america.yaml b/src/climetlab/data/projections/north-america.yaml similarity index 100% rename from climetlab/data/projections/north-america.yaml rename to src/climetlab/data/projections/north-america.yaml diff --git a/climetlab/data/projections/north-america1.yaml b/src/climetlab/data/projections/north-america1.yaml similarity index 100% rename from climetlab/data/projections/north-america1.yaml rename to src/climetlab/data/projections/north-america1.yaml diff --git a/climetlab/data/projections/north-atlantic.yaml b/src/climetlab/data/projections/north-atlantic.yaml similarity index 100% rename from climetlab/data/projections/north-atlantic.yaml rename to src/climetlab/data/projections/north-atlantic.yaml diff --git a/climetlab/data/projections/north-hemisphere.yaml b/src/climetlab/data/projections/north-hemisphere.yaml similarity index 100% rename from climetlab/data/projections/north-hemisphere.yaml rename to src/climetlab/data/projections/north-hemisphere.yaml diff --git a/climetlab/data/projections/polar-north.yaml b/src/climetlab/data/projections/polar-north.yaml similarity index 100% rename from climetlab/data/projections/polar-north.yaml rename to src/climetlab/data/projections/polar-north.yaml diff --git a/climetlab/data/projections/robinson.yaml b/src/climetlab/data/projections/robinson.yaml similarity index 100% rename from climetlab/data/projections/robinson.yaml rename to src/climetlab/data/projections/robinson.yaml diff --git a/climetlab/data/projections/south-america.yaml b/src/climetlab/data/projections/south-america.yaml similarity index 100% rename from climetlab/data/projections/south-america.yaml rename to src/climetlab/data/projections/south-america.yaml diff --git a/climetlab/data/projections/south-atlantic.yaml b/src/climetlab/data/projections/south-atlantic.yaml similarity index 100% rename from climetlab/data/projections/south-atlantic.yaml rename to src/climetlab/data/projections/south-atlantic.yaml diff --git a/climetlab/data/projections/south-hemisphere.yaml b/src/climetlab/data/projections/south-hemisphere.yaml similarity index 100% rename from climetlab/data/projections/south-hemisphere.yaml rename to src/climetlab/data/projections/south-hemisphere.yaml diff --git a/climetlab/data/projections/south-pacific.yaml b/src/climetlab/data/projections/south-pacific.yaml similarity index 100% rename from climetlab/data/projections/south-pacific.yaml rename to src/climetlab/data/projections/south-pacific.yaml diff --git a/climetlab/data/projections/tapestry.yaml b/src/climetlab/data/projections/tapestry.yaml similarity index 100% rename from climetlab/data/projections/tapestry.yaml rename to src/climetlab/data/projections/tapestry.yaml diff --git a/climetlab/data/projections/tropics-east.yaml b/src/climetlab/data/projections/tropics-east.yaml similarity index 100% rename from climetlab/data/projections/tropics-east.yaml rename to src/climetlab/data/projections/tropics-east.yaml diff --git a/climetlab/data/projections/tropics-west.yaml b/src/climetlab/data/projections/tropics-west.yaml similarity index 100% rename from climetlab/data/projections/tropics-west.yaml rename to src/climetlab/data/projections/tropics-west.yaml diff --git a/climetlab/data/projections/web-mercator.yaml b/src/climetlab/data/projections/web-mercator.yaml similarity index 100% rename from climetlab/data/projections/web-mercator.yaml rename to src/climetlab/data/projections/web-mercator.yaml diff --git a/climetlab/data/styles/cyclone-track.yaml b/src/climetlab/data/styles/cyclone-track.yaml similarity index 100% rename from climetlab/data/styles/cyclone-track.yaml rename to src/climetlab/data/styles/cyclone-track.yaml diff --git a/climetlab/data/styles/default-style-fields.yaml b/src/climetlab/data/styles/default-style-fields.yaml similarity index 100% rename from climetlab/data/styles/default-style-fields.yaml rename to src/climetlab/data/styles/default-style-fields.yaml diff --git a/climetlab/data/styles/default-style-observations.yaml b/src/climetlab/data/styles/default-style-observations.yaml similarity index 100% rename from climetlab/data/styles/default-style-observations.yaml rename to src/climetlab/data/styles/default-style-observations.yaml diff --git a/climetlab/data/styles/land-sea-mask.yaml b/src/climetlab/data/styles/land-sea-mask.yaml similarity index 100% rename from climetlab/data/styles/land-sea-mask.yaml rename to src/climetlab/data/styles/land-sea-mask.yaml diff --git a/climetlab/data/styles/no-style.yaml b/src/climetlab/data/styles/no-style.yaml similarity index 100% rename from climetlab/data/styles/no-style.yaml rename to src/climetlab/data/styles/no-style.yaml diff --git a/climetlab/data/styles/orography.yaml b/src/climetlab/data/styles/orography.yaml similarity index 100% rename from climetlab/data/styles/orography.yaml rename to src/climetlab/data/styles/orography.yaml diff --git a/climetlab/data/styles/rainbow-markers.yaml b/src/climetlab/data/styles/rainbow-markers.yaml similarity index 100% rename from climetlab/data/styles/rainbow-markers.yaml rename to src/climetlab/data/styles/rainbow-markers.yaml diff --git a/climetlab/data/styles/tapestry.yaml b/src/climetlab/data/styles/tapestry.yaml similarity index 100% rename from climetlab/data/styles/tapestry.yaml rename to src/climetlab/data/styles/tapestry.yaml diff --git a/climetlab/datasets/__init__.py b/src/climetlab/datasets/__init__.py similarity index 98% rename from climetlab/datasets/__init__.py rename to src/climetlab/datasets/__init__.py index 8dfa3632..8e49c55d 100644 --- a/climetlab/datasets/__init__.py +++ b/src/climetlab/datasets/__init__.py @@ -179,9 +179,7 @@ def __call__(self): def _dataset_from_dict(name, dataset, path=None): attributes = dataset.get("metadata", {}) - attributes.update( - dict(_path=path, _src=dataset["source"], _args=dataset.get("args", {})) - ) + attributes.update(dict(_path=path, _src=dataset["source"], _args=dataset.get("args", {}))) return type(camel(name), (YamlDefinedDataset,), attributes) diff --git a/climetlab/datasets/era5_precipitations.py b/src/climetlab/datasets/era5_precipitations.py similarity index 100% rename from climetlab/datasets/era5_precipitations.py rename to src/climetlab/datasets/era5_precipitations.py diff --git a/climetlab/datasets/era5_single_levels.py b/src/climetlab/datasets/era5_single_levels.py similarity index 93% rename from climetlab/datasets/era5_single_levels.py rename to src/climetlab/datasets/era5_single_levels.py index 6ecf932b..a0413ea9 100644 --- a/climetlab/datasets/era5_single_levels.py +++ b/src/climetlab/datasets/era5_single_levels.py @@ -46,9 +46,7 @@ def __init__(self, variable, period, domain=None, time=None, grid=None): sources = [] for year in range(period[0], period[1] + 1): request["year"] = year - sources.append( - load_source("cds", "reanalysis-era5-single-levels", **request) - ) + sources.append(load_source("cds", "reanalysis-era5-single-levels", **request)) self.source = load_source("multi", sources) diff --git a/climetlab/datasets/era5_temperature.py b/src/climetlab/datasets/era5_temperature.py similarity index 100% rename from climetlab/datasets/era5_temperature.py rename to src/climetlab/datasets/era5_temperature.py diff --git a/climetlab/datasets/example-dataset.yaml b/src/climetlab/datasets/example-dataset.yaml similarity index 100% rename from climetlab/datasets/example-dataset.yaml rename to src/climetlab/datasets/example-dataset.yaml diff --git a/climetlab/datasets/high_low.py b/src/climetlab/datasets/high_low.py similarity index 100% rename from climetlab/datasets/high_low.py rename to src/climetlab/datasets/high_low.py diff --git a/climetlab/datasets/hurricane_database.py b/src/climetlab/datasets/hurricane_database.py similarity index 100% rename from climetlab/datasets/hurricane_database.py rename to src/climetlab/datasets/hurricane_database.py diff --git a/climetlab/datasets/meteonet_samples/__init__.py b/src/climetlab/datasets/meteonet_samples/__init__.py similarity index 100% rename from climetlab/datasets/meteonet_samples/__init__.py rename to src/climetlab/datasets/meteonet_samples/__init__.py diff --git a/climetlab/datasets/meteonet_samples/dataset.yaml b/src/climetlab/datasets/meteonet_samples/dataset.yaml similarity index 100% rename from climetlab/datasets/meteonet_samples/dataset.yaml rename to src/climetlab/datasets/meteonet_samples/dataset.yaml diff --git a/climetlab/datasets/meteonet_samples/ground_stations.py b/src/climetlab/datasets/meteonet_samples/ground_stations.py similarity index 86% rename from climetlab/datasets/meteonet_samples/ground_stations.py rename to src/climetlab/datasets/meteonet_samples/ground_stations.py index 106cdec7..603659e1 100644 --- a/climetlab/datasets/meteonet_samples/ground_stations.py +++ b/src/climetlab/datasets/meteonet_samples/ground_stations.py @@ -20,14 +20,10 @@ class MeteonetGroundStations(Meteonet): """ def __init__(self, domain="NW", date="20160101"): - url = "{url}/ground_stations/{domain}_{date}.csv".format( - url=self.URL, domain=domain, date=date - ) + url = "{url}/ground_stations/{domain}_{date}.csv".format(url=self.URL, domain=domain, date=date) self.path = download_and_cache(url) - self._pandas = pd.read_csv( - self.path, parse_dates=[4], infer_datetime_format=True - ) + self._pandas = pd.read_csv(self.path, parse_dates=[4], infer_datetime_format=True) def to_pandas(self): return self._pandas diff --git a/climetlab/datasets/meteonet_samples/masks.py b/src/climetlab/datasets/meteonet_samples/masks.py similarity index 100% rename from climetlab/datasets/meteonet_samples/masks.py rename to src/climetlab/datasets/meteonet_samples/masks.py diff --git a/climetlab/datasets/meteonet_samples/radar.py b/src/climetlab/datasets/meteonet_samples/radar.py similarity index 95% rename from climetlab/datasets/meteonet_samples/radar.py rename to src/climetlab/datasets/meteonet_samples/radar.py index f1564083..1451e6d1 100644 --- a/climetlab/datasets/meteonet_samples/radar.py +++ b/src/climetlab/datasets/meteonet_samples/radar.py @@ -28,9 +28,7 @@ class MeteonetRadar(Meteonet): """ def __init__(self, domain="NW", variable="rainfall", year=2016, month=8, part=3): - url = "{url}/radar/radar_coords_{domain}.npz".format( - url=self.URL, domain=domain - ) + url = "{url}/radar/radar_coords_{domain}.npz".format(url=self.URL, domain=domain) coords = np.load(download_and_cache(url), allow_pickle=True) diff --git a/climetlab/datasets/meteonet_samples/styles/meteonet-radar-rainfall.yaml b/src/climetlab/datasets/meteonet_samples/styles/meteonet-radar-rainfall.yaml similarity index 100% rename from climetlab/datasets/meteonet_samples/styles/meteonet-radar-rainfall.yaml rename to src/climetlab/datasets/meteonet_samples/styles/meteonet-radar-rainfall.yaml diff --git a/climetlab/datasets/meteonet_samples/weather_models.py b/src/climetlab/datasets/meteonet_samples/weather_models.py similarity index 88% rename from climetlab/datasets/meteonet_samples/weather_models.py rename to src/climetlab/datasets/meteonet_samples/weather_models.py index 4ab2b2e8..18a85186 100644 --- a/climetlab/datasets/meteonet_samples/weather_models.py +++ b/src/climetlab/datasets/meteonet_samples/weather_models.py @@ -18,9 +18,7 @@ class MeteonetWeatherModels(Meteonet): See https://github.com/meteofrance/meteonet """ - def __init__( - self, model="arome", variable="2m", domain="NW", date="20180501", time="0000" - ): + def __init__(self, model="arome", variable="2m", domain="NW", date="20180501", time="0000"): url = "{url}/weather_models/{model}_{variable}_{domain}_{date}{time}00.grib".format( url=self.URL, variable=variable, diff --git a/climetlab/datasets/sample-bufr-data.yaml b/src/climetlab/datasets/sample-bufr-data.yaml similarity index 100% rename from climetlab/datasets/sample-bufr-data.yaml rename to src/climetlab/datasets/sample-bufr-data.yaml diff --git a/climetlab/datasets/sample-grib-data.yaml b/src/climetlab/datasets/sample-grib-data.yaml similarity index 100% rename from climetlab/datasets/sample-grib-data.yaml rename to src/climetlab/datasets/sample-grib-data.yaml diff --git a/climetlab/datasets/weather_bench.py b/src/climetlab/datasets/weather_bench.py similarity index 100% rename from climetlab/datasets/weather_bench.py rename to src/climetlab/datasets/weather_bench.py diff --git a/climetlab/debug.py b/src/climetlab/debug.py similarity index 100% rename from climetlab/debug.py rename to src/climetlab/debug.py diff --git a/climetlab/decorators.py b/src/climetlab/decorators.py similarity index 96% rename from climetlab/decorators.py rename to src/climetlab/decorators.py index 371a0868..7cb19c8f 100644 --- a/climetlab/decorators.py +++ b/src/climetlab/decorators.py @@ -167,13 +167,9 @@ def f(**kwargs): kwargs = f(**kwargs) if "time" in kwargs: - kwargs["time"] = {False: _normalize_time, True: _normalize_time_as_tuple}[ - as_tuple - ](kwargs["time"], int) + kwargs["time"] = {False: _normalize_time, True: _normalize_time_as_tuple}[as_tuple](kwargs["time"], int) if "expver" in kwargs: - kwargs["expver"] = {False: _normalize_expver, True: _normalize_expver_as_tuple}[ - as_tuple - ](kwargs["expver"]) + kwargs["expver"] = {False: _normalize_expver, True: _normalize_expver_as_tuple}[as_tuple](kwargs["expver"]) return kwargs @@ -236,9 +232,7 @@ def __init__( self.name = name if isinstance(values, str): - assert ( - kwargs.get("type") is None - ), f"Cannot mix values={values} and type={kwargs.get('type')}" + assert kwargs.get("type") is None, f"Cannot mix values={values} and type={kwargs.get('type')}" if "(" in values: m = re.match(r"(.+)\((.+)\)", values) type = m.group(1) diff --git a/climetlab/distributed/__init__.py b/src/climetlab/distributed/__init__.py similarity index 100% rename from climetlab/distributed/__init__.py rename to src/climetlab/distributed/__init__.py diff --git a/climetlab/distributed/dask.py b/src/climetlab/distributed/dask.py similarity index 94% rename from climetlab/distributed/dask.py rename to src/climetlab/distributed/dask.py index 93682bc0..5604de8f 100644 --- a/climetlab/distributed/dask.py +++ b/src/climetlab/distributed/dask.py @@ -108,9 +108,7 @@ def __str__(self): def _repr_html_(self): dashboard = self.cluster.dashboard_link - dashboard = re.sub( - "http://[0-9\\.]*:", "https://localhost:48167/proxy/", dashboard - ) + dashboard = re.sub("http://[0-9\\.]*:", "https://localhost:48167/proxy/", dashboard) return f"Cluster={self.cluster}, Client={self.client}, Dashboard:{dashboard}" @property @@ -121,9 +119,7 @@ def dashboard_link(self): def start_dask(name_or_yaml_filename, **kwargs): if len(CURRENT_DEPLOYS) > 0: - LOG.warn( - f"Creating multiple dask clusters ({len(CURRENT_DEPLOYS)}) already running)." - ) + LOG.warn(f"Creating multiple dask clusters ({len(CURRENT_DEPLOYS)}) already running).") _, ext = os.path.splitext(name_or_yaml_filename) if ext in (".yaml", ".yml"): diff --git a/climetlab/exceptions.py b/src/climetlab/exceptions.py similarity index 100% rename from climetlab/exceptions.py rename to src/climetlab/exceptions.py diff --git a/climetlab/grids/__init__.py b/src/climetlab/grids/__init__.py similarity index 97% rename from climetlab/grids/__init__.py rename to src/climetlab/grids/__init__.py index 2b055a2b..4ced87db 100644 --- a/climetlab/grids/__init__.py +++ b/src/climetlab/grids/__init__.py @@ -168,9 +168,7 @@ def ecef(lat, lon, i): def unstructed_to_structed(grib, chunk_size=-1): now = time.time() print("----") - xyz = np.array( - [ecef(lat, lon, i) for i, (lat, lon) in enumerate(grib.iterate_grid_points())] - ) + xyz = np.array([ecef(lat, lon, i) for i, (lat, lon) in enumerate(grib.iterate_grid_points())]) print("----", time.time() - now) print(len(xyz)) diff --git a/climetlab/indexing/__init__.py b/src/climetlab/indexing/__init__.py similarity index 77% rename from climetlab/indexing/__init__.py rename to src/climetlab/indexing/__init__.py index 67b7e3cf..4f8adbd4 100644 --- a/climetlab/indexing/__init__.py +++ b/src/climetlab/indexing/__init__.py @@ -12,12 +12,8 @@ class GlobalIndex: def __init__(self, index_location, baseurl) -> None: - warnings.warn( - "GlobalIndex is obsolete. Please update your code and use the 'directory' source" - ) - raise NotImplementedError( - "GlobalIndex is obsolete. Please update your code and use the 'directory' source" - ) + warnings.warn("GlobalIndex is obsolete. Please update your code and use the 'directory' source") + raise NotImplementedError("GlobalIndex is obsolete. Please update your code and use the 'directory' source") """The GloblaIndex has one index managing multiple urls/files. This unique index is found at "index_location" The path of each file is written in the index as a relative path. diff --git a/climetlab/indexing/cube.py b/src/climetlab/indexing/cube.py similarity index 92% rename from climetlab/indexing/cube.py rename to src/climetlab/indexing/cube.py index 176ff42d..eb46a72a 100644 --- a/climetlab/indexing/cube.py +++ b/src/climetlab/indexing/cube.py @@ -86,9 +86,7 @@ def __init__( # Get a mapping of user names to unique values # With possible reduce dimentionality if the user use 'level+param' - self.user_coords = self.source.unique_values( - *names, remapping=remapping, patches=patches - ) + self.user_coords = self.source.unique_values(*names, remapping=remapping, patches=patches) self.user_shape = tuple(len(v) for k, v in self.user_coords.items()) @@ -96,9 +94,7 @@ def __init__( details = [] for key, v in self.user_coords.items(): details.append(f"{key=} ({len(v)}) {v}") - assert not isinstance( - self.source, str - ), f"Not expecting a str here ({self.source})" + assert not isinstance(self.source, str), f"Not expecting a str here ({self.source})" for i, f in enumerate(self.source): details.append(f"{i}={f} {f.metadata('number')}") if i > 30: @@ -214,8 +210,7 @@ def iterate_cubelets(self, reading_chunks=None, **kwargs): indexes = list(range(0, len(lst)) for lst in names) return ( - Cubelet(self, i, coords_names=n) - for n, i in zip(itertools.product(*names), itertools.product(*indexes)) + Cubelet(self, i, coords_names=n) for n, i in zip(itertools.product(*names), itertools.product(*indexes)) ) def chunking(self, chunks): @@ -255,15 +250,11 @@ def __init__(self, cube, coords, coords_names=None): self.flatten_values = cube.flatten_values def __repr__(self): - return ( - f"{self.__class__.__name__}({self.coords},index_names={self._coords_names})" - ) + return f"{self.__class__.__name__}({self.coords},index_names={self._coords_names})" @property def extended_icoords(self): return self.coords def to_numpy(self, **kwargs): - return self.owner[self.coords].to_numpy( - reshape=not self.flatten_values, **kwargs - ) + return self.owner[self.coords].to_numpy(reshape=not self.flatten_values, **kwargs) diff --git a/climetlab/indexing/database/__init__.py b/src/climetlab/indexing/database/__init__.py similarity index 100% rename from climetlab/indexing/database/__init__.py rename to src/climetlab/indexing/database/__init__.py diff --git a/climetlab/indexing/database/json.py b/src/climetlab/indexing/database/json.py similarity index 100% rename from climetlab/indexing/database/json.py rename to src/climetlab/indexing/database/json.py diff --git a/climetlab/indexing/database/sql.py b/src/climetlab/indexing/database/sql.py similarity index 94% rename from climetlab/indexing/database/sql.py rename to src/climetlab/indexing/database/sql.py index 935a2edf..1225565d 100644 --- a/climetlab/indexing/database/sql.py +++ b/src/climetlab/indexing/database/sql.py @@ -24,16 +24,14 @@ from climetlab.utils import tqdm from climetlab.utils.parts import Part -from . import ( - FILEPARTS_KEY_NAMES, - MORE_KEY_NAMES, - MORE_KEY_NAMES_WITH_UNDERSCORE, - STATISTICS_KEY_NAMES, - Database, - FloatDBKey, - IntDBKey, - StrDBKey, -) +from . import FILEPARTS_KEY_NAMES +from . import MORE_KEY_NAMES +from . import MORE_KEY_NAMES_WITH_UNDERSCORE +from . import STATISTICS_KEY_NAMES +from . import Database +from . import FloatDBKey +from . import IntDBKey +from . import StrDBKey LOG = logging.getLogger(__name__) @@ -43,10 +41,7 @@ def dump_sql(statement): statement = statement.replace(";", " ;") statement = statement.replace("(", " (") lst = statement.split() - lst = [ - "userorder_entries_...\n" if x.startswith("userorder_entries") else x - for x in lst - ] + lst = ["userorder_entries_...\n" if x.startswith("userorder_entries") else x for x in lst] print(" ".join(lst)) @@ -130,9 +125,7 @@ def patch(self): pass try: - execute( - self.connection, "ALTER TABLE entries DROP COLUMN i_param_levelist;" - ) + execute(self.connection, "ALTER TABLE entries DROP COLUMN i_param_levelist;") except sqlite3.OperationalError: pass @@ -180,9 +173,7 @@ def create_table_from_entry_if_needed(self, entry): def _add_column(self, k, v): dbkey = self._build_dbkey(k, v) - statement = ( - f"ALTER TABLE {self.table_name} ADD COLUMN {dbkey.name} {dbkey.sql_type};" - ) + statement = f"ALTER TABLE {self.table_name} ADD COLUMN {dbkey.name} {dbkey.sql_type};" LOG.debug("%s", statement) try: execute(self.connection, statement) @@ -270,11 +261,7 @@ def h(self, *args, **kwargs): m.update(str(kwargs).encode("utf-8")) m.update(str(self.remapping.as_dict()).encode("utf-8")) m.update(str(self.__class__.__name__).encode("utf-8")) - m.update( - json.dumps(self.kwargs, sort_keys=True, default=json_serialiser).encode( - "utf-8" - ) - ) + m.update(json.dumps(self.kwargs, sort_keys=True, default=json_serialiser).encode("utf-8")) return m.hexdigest() def __str__(self): @@ -287,9 +274,7 @@ def is_empty(self): def create_new_view(self, db, view): new_view = "entries_" + self.h(parent_view=view) assert new_view != view - view_statement = self.create_view_statement( - db, old_view=view, new_view=new_view - ) + view_statement = self.create_view_statement(db, old_view=view, new_view=new_view) if not view_statement: # nothing to do return view @@ -360,10 +345,7 @@ def join(self, lst): return None assert new_view != old_view - return ( - f"CREATE TEMP VIEW IF NOT EXISTS {new_view} AS SELECT *, {select} " - f"FROM {old_view};" - ) + return f"CREATE TEMP VIEW IF NOT EXISTS {new_view} AS SELECT *, {select} " f"FROM {old_view};" class SqlOrder(SqlFilter): @@ -534,9 +516,7 @@ def unique_values(self, *coords, remapping=None, patches=None, progress_bar=True results = {} for c in coords: column = entryname_to_dbname(c) - values = [ - v[0] for v in execute(con, f"SELECT DISTINCT {column} FROM {view};") - ] + values = [v[0] for v in execute(con, f"SELECT DISTINCT {column} FROM {view};")] LOG.debug("Reordered values for {column}", column, values) results[column] = values diff --git a/climetlab/indexing/fieldset.py b/src/climetlab/indexing/fieldset.py similarity index 94% rename from climetlab/indexing/fieldset.py rename to src/climetlab/indexing/fieldset.py index bfdedcc6..951bc929 100644 --- a/climetlab/indexing/fieldset.py +++ b/src/climetlab/indexing/fieldset.py @@ -8,7 +8,9 @@ # from climetlab.core import Base -from climetlab.core.index import Index, MaskIndex, MultiIndex +from climetlab.core.index import Index +from climetlab.core.index import MaskIndex +from climetlab.core.index import MultiIndex class Field(Base): diff --git a/climetlab/mergers/__init__.py b/src/climetlab/mergers/__init__.py similarity index 100% rename from climetlab/mergers/__init__.py rename to src/climetlab/mergers/__init__.py diff --git a/climetlab/mergers/pandas.py b/src/climetlab/mergers/pandas.py similarity index 100% rename from climetlab/mergers/pandas.py rename to src/climetlab/mergers/pandas.py diff --git a/climetlab/mergers/tfdataset.py b/src/climetlab/mergers/tfdataset.py similarity index 100% rename from climetlab/mergers/tfdataset.py rename to src/climetlab/mergers/tfdataset.py diff --git a/climetlab/mergers/xarray.py b/src/climetlab/mergers/xarray.py similarity index 91% rename from climetlab/mergers/xarray.py rename to src/climetlab/mergers/xarray.py index 931a3d61..8747e0a4 100644 --- a/climetlab/mergers/xarray.py +++ b/src/climetlab/mergers/xarray.py @@ -64,18 +64,14 @@ def merge( user_kwargs=kwargs, ) - if reader_class is not None and hasattr( - reader_class, "to_xarray_multi_from_sources" - ): + if reader_class is not None and hasattr(reader_class, "to_xarray_multi_from_sources"): return reader_class.to_xarray_multi_from_sources( sources, **options, ) if paths is not None: - if reader_class is not None and hasattr( - reader_class, "to_xarray_multi_from_paths" - ): + if reader_class is not None and hasattr(reader_class, "to_xarray_multi_from_paths"): return reader_class.to_xarray_multi_from_paths( paths, **options, diff --git a/climetlab/metview/__init__.py b/src/climetlab/metview/__init__.py similarity index 100% rename from climetlab/metview/__init__.py rename to src/climetlab/metview/__init__.py diff --git a/climetlab/mirrors/__init__.py b/src/climetlab/mirrors/__init__.py similarity index 92% rename from climetlab/mirrors/__init__.py rename to src/climetlab/mirrors/__init__.py index eccdd227..7cf83840 100644 --- a/climetlab/mirrors/__init__.py +++ b/src/climetlab/mirrors/__init__.py @@ -21,9 +21,7 @@ def __init__(self, mirror, source): def get_file(self, create, args): if self.resource(): - LOG.debug( - f"Found a copy of {self.source} in mirror {self.mirror}: {self.resource()}." - ) + LOG.debug(f"Found a copy of {self.source} in mirror {self.mirror}: {self.resource()}.") return self.resource() if not self.mirror._prefetch: LOG.debug(f"No copy of {self.source} into {self.mirror}: prefetch=False.") @@ -36,9 +34,7 @@ def resource(self): return None def create_copy(self, create, args): - LOG.info( - f"Not implemented. Not creating anything for {self.source} in mirror {self.mirror}." - ) + LOG.info(f"Not implemented. Not creating anything for {self.source} in mirror {self.mirror}.") return None diff --git a/climetlab/mirrors/directory_mirror.py b/src/climetlab/mirrors/directory_mirror.py similarity index 98% rename from climetlab/mirrors/directory_mirror.py rename to src/climetlab/mirrors/directory_mirror.py index 02771846..54567c96 100644 --- a/climetlab/mirrors/directory_mirror.py +++ b/src/climetlab/mirrors/directory_mirror.py @@ -15,7 +15,8 @@ from climetlab.sources.file import FileSource from climetlab.sources.url import Url -from . import BaseMirror, MirrorConnection +from . import BaseMirror +from . import MirrorConnection LOG = logging.getLogger(__name__) diff --git a/climetlab/ml/__init__.py b/src/climetlab/ml/__init__.py similarity index 100% rename from climetlab/ml/__init__.py rename to src/climetlab/ml/__init__.py diff --git a/climetlab/ml/data_io.py b/src/climetlab/ml/data_io.py similarity index 98% rename from climetlab/ml/data_io.py rename to src/climetlab/ml/data_io.py index b09e0c61..9d52af28 100644 --- a/climetlab/ml/data_io.py +++ b/src/climetlab/ml/data_io.py @@ -7,7 +7,8 @@ import torch -from .utils import as_numpy_func, default_merger +from .utils import as_numpy_func +from .utils import default_merger class TorchDataset(torch.utils.data.Dataset): diff --git a/climetlab/ml/filters.py b/src/climetlab/ml/filters.py similarity index 83% rename from climetlab/ml/filters.py rename to src/climetlab/ml/filters.py index abc587a9..abcf4112 100644 --- a/climetlab/ml/filters.py +++ b/src/climetlab/ml/filters.py @@ -56,16 +56,16 @@ def fn(i): return fn -class NormaliseMeanStdFilter(YAFilter): - def __init__(self, source): - a, b = compute_mean_std_from_source(source) # noqa: TODO - super().__init__(source, a, b) +# class NormaliseMeanStdFilter(YAFilter): +# def __init__(self, source): +# a, b = compute_mean_std_from_source(source) # noqa: TODO +# super().__init__(source, a, b) -class NormaliseMinMaxFilter(YAFilter): - def __init__(self, source): - a, b = compute_min_max_from_source(source) # noqa: TODO - super().__init__(source, a, b) +# class NormaliseMinMaxFilter(YAFilter): +# def __init__(self, source): +# a, b = compute_min_max_from_source(source) # noqa: TODO +# super().__init__(source, a, b) class OffsetFilter(YAFilter): diff --git a/climetlab/ml/tf.py b/src/climetlab/ml/tf.py similarity index 87% rename from climetlab/ml/tf.py rename to src/climetlab/ml/tf.py index 5bd16de2..59f38675 100644 --- a/climetlab/ml/tf.py +++ b/src/climetlab/ml/tf.py @@ -23,7 +23,5 @@ def __init__(self, *args, **kwargs): def call(self, inputs): w, h = self.kernel_size inputs = tf.concat([inputs, inputs[:, :, :w, :]], axis=2) - inputs = tf.pad( - inputs, [[0, 0], [h // 2, h // 2], [0, 0], [0, 0]], constant_values=0 - ) + inputs = tf.pad(inputs, [[0, 0], [h // 2, h // 2], [0, 0], [0, 0]], constant_values=0) return super().call(inputs) diff --git a/climetlab/ml/torch.py b/src/climetlab/ml/torch.py similarity index 100% rename from climetlab/ml/torch.py rename to src/climetlab/ml/torch.py diff --git a/climetlab/ml/utils.py b/src/climetlab/ml/utils.py similarity index 82% rename from climetlab/ml/utils.py rename to src/climetlab/ml/utils.py index 57b7d587..29791829 100644 --- a/climetlab/ml/utils.py +++ b/src/climetlab/ml/utils.py @@ -76,9 +76,7 @@ def wrap(i): def normalize_a_b(option, dataset): - if isinstance(option, (tuple, list)) and all( - [isinstance(x, Number) for x in option] - ): + if isinstance(option, (tuple, list)) and all([isinstance(x, Number) for x in option]): a, b = option return a, b @@ -86,9 +84,7 @@ def normalize_a_b(option, dataset): stats = dataset.statistics() average, stdev = stats["average"], stats["stdev"] if stdev < (average * 1e-6): - warnings.warn( - f"Normalizing: the field seems to have only one value {stats}" - ) + warnings.warn(f"Normalizing: the field seems to have only one value {stats}") return 1 / stdev, -average / stdev if option == "min-max": @@ -96,9 +92,7 @@ def normalize_a_b(option, dataset): mini, maxi = stats["minimum"], stats["maximum"] x = maxi - mini if x < 1e-9: - warnings.warn( - f"Normalizing: the field seems to have only one value {stats}." - ) + warnings.warn(f"Normalizing: the field seems to have only one value {stats}.") return 1 / x, -mini / x raise ValueError(option) @@ -116,13 +110,8 @@ def to_funcs(features, targets, options, targets_options, merger, targets_merger assert isinstance(features, (list, tuple)), features assert len(features) == len(options), (len(features), len(options)) - funcs = [ - as_numpy_func(_, opt) for _, opt in zip_longest(features, options, fillvalue={}) - ] - funcs_targets = [ - as_numpy_func(_, opt) - for _, opt in zip_longest(targets, targets_options, fillvalue={}) - ] + funcs = [as_numpy_func(_, opt) for _, opt in zip_longest(features, options, fillvalue={})] + funcs_targets = [as_numpy_func(_, opt) for _, opt in zip_longest(targets, targets_options, fillvalue={})] func = merger(*funcs) func_targets = targets_merger(*funcs_targets) diff --git a/climetlab/mockup.py b/src/climetlab/mockup.py similarity index 100% rename from climetlab/mockup.py rename to src/climetlab/mockup.py diff --git a/climetlab/normalize.py b/src/climetlab/normalize.py similarity index 90% rename from climetlab/normalize.py rename to src/climetlab/normalize.py index 2397e23a..2c6b0dc0 100644 --- a/climetlab/normalize.py +++ b/src/climetlab/normalize.py @@ -14,9 +14,7 @@ class normalize_args: def __init__(self, **dic): - warnings.warn( - "Deprecated decorator @normalize_arg. Use @normalise on each argument instead." - ) + warnings.warn("Deprecated decorator @normalize_arg. Use @normalise on each argument instead.") self.decorators = [] for name, values in dic.items(): if isinstance(values, list): diff --git a/climetlab/notebook/__init__.py b/src/climetlab/notebook/__init__.py similarity index 100% rename from climetlab/notebook/__init__.py rename to src/climetlab/notebook/__init__.py diff --git a/climetlab/notebook/table.py b/src/climetlab/notebook/table.py similarity index 97% rename from climetlab/notebook/table.py rename to src/climetlab/notebook/table.py index c9936110..b845a497 100644 --- a/climetlab/notebook/table.py +++ b/src/climetlab/notebook/table.py @@ -38,5 +38,5 @@ def _repr_html_(self): def render(self, element): e = element.render() - src, attrs = e._repr_png_() + src, _ = e._repr_png_() return f'' diff --git a/climetlab/plotting/__init__.py b/src/climetlab/plotting/__init__.py similarity index 92% rename from climetlab/plotting/__init__.py rename to src/climetlab/plotting/__init__.py index 98d346ee..133f3373 100644 --- a/climetlab/plotting/__init__.py +++ b/src/climetlab/plotting/__init__.py @@ -12,8 +12,10 @@ from collections import defaultdict from functools import partial -from climetlab.core.data import data_entries, get_data_entry -from climetlab.core.ipython import Image, display +from climetlab.core.data import data_entries +from climetlab.core.data import get_data_entry +from climetlab.core.ipython import Image +from climetlab.core.ipython import display from climetlab.core.settings import SETTINGS from climetlab.core.temporary import temp_file from climetlab.wrappers import get_wrapper @@ -114,9 +116,7 @@ def files_to_apng(files, path, fps): try: from numpngw import write_apng except ImportError: - raise RuntimeError( - "Package numpngw is required to save animated PNGs (pip install numpngw)" - ) + raise RuntimeError("Package numpngw is required to save animated PNGs (pip install numpngw)") frames = [imageio.imread(f) for f in files] @@ -140,9 +140,7 @@ def files_to_opencv(files, path, fps, cccc): try: import cv2 except ImportError: - raise RuntimeError( - "Package OpenCV is required to save movies (pip install opencv-python)" - ) + raise RuntimeError("Package OpenCV is required to save movies (pip install opencv-python)") frame = cv2.imread(files[0]) height, width, _ = frame.shape @@ -172,10 +170,7 @@ def files_to_opencv(files, path, fps, cccc): def unsupported(files, path, fps): - raise NotImplementedError( - f"Unsupported format for '{path}'." - f" Supported formats are {list(CODECS.keys())}" - ) + raise NotImplementedError(f"Unsupported format for '{path}'." f" Supported formats are {list(CODECS.keys())}") def files_to_movie(files, path, fps): @@ -268,9 +263,7 @@ def render(self, step, path): try: import imageio except ImportError: - raise RuntimeError( - "Package imageio is required to create animations (pip install imageio)" - ) + raise RuntimeError("Package imageio is required to create animations (pip install imageio)") import numpy as np WHITE = {1: 255} diff --git a/climetlab/plotting/backends/__init__.py b/src/climetlab/plotting/backends/__init__.py similarity index 100% rename from climetlab/plotting/backends/__init__.py rename to src/climetlab/plotting/backends/__init__.py diff --git a/climetlab/plotting/backends/magics/__init__.py b/src/climetlab/plotting/backends/magics/__init__.py similarity index 100% rename from climetlab/plotting/backends/magics/__init__.py rename to src/climetlab/plotting/backends/magics/__init__.py diff --git a/climetlab/plotting/backends/magics/actions.py b/src/climetlab/plotting/backends/magics/actions.py similarity index 92% rename from climetlab/plotting/backends/magics/actions.py rename to src/climetlab/plotting/backends/magics/actions.py index 5ec67832..d0fdbaf9 100644 --- a/climetlab/plotting/backends/magics/actions.py +++ b/src/climetlab/plotting/backends/magics/actions.py @@ -20,9 +20,7 @@ class NoMagics: def plot(self, *args, **kwargs): - raise NotImplementedError( - "Magics was not loaded successfully, plotting is not supported." - ) + raise NotImplementedError("Magics was not loaded successfully, plotting is not supported.") try: @@ -62,9 +60,7 @@ def action(self): return self.__class__.__name__ def execute(self): - return getattr(macro, self.action)( - **convert(self.action, self.kwargs) - ).execute() + return getattr(macro, self.action)(**convert(self.action, self.kwargs)).execute() def update(self, action, values): if not isinstance(self, action): diff --git a/climetlab/plotting/backends/magics/apply.py b/src/climetlab/plotting/backends/magics/apply.py similarity index 93% rename from climetlab/plotting/backends/magics/apply.py rename to src/climetlab/plotting/backends/magics/apply.py index 6b428be4..c2c3a517 100644 --- a/climetlab/plotting/backends/magics/apply.py +++ b/src/climetlab/plotting/backends/magics/apply.py @@ -108,17 +108,13 @@ def _apply_dict(*, value, collection, action, default, target, options): # noqa action, special = _find_action(value, action) if special: if special != len(value): - raise ValueError( - "Cannot set some attributes and override others %r" % list(value.keys()) - ) + raise ValueError("Cannot set some attributes and override others %r" % list(value.keys())) result = target.update(action, value) if result is not None: return result - raise ValueError( - "Cannot override attributes %r (no matching style)" % list(value.keys()) - ) + raise ValueError("Cannot override attributes %r (no matching style)" % list(value.keys())) return action(**value) @@ -146,8 +142,7 @@ def _apply_string(*, value, collection, action, default, target, options): actions = list(magics.keys()) if len(actions) != 1: raise ValueError( - "%s %s: one, and only one magics action can be defined in a yaml file: %r" - % (collection, value, actions) + "%s %s: one, and only one magics action can be defined in a yaml file: %r" % (collection, value, actions) ) name = actions[0] diff --git a/climetlab/plotting/backends/magics/backend.py b/src/climetlab/plotting/backends/magics/backend.py similarity index 94% rename from climetlab/plotting/backends/magics/backend.py rename to src/climetlab/plotting/backends/magics/backend.py index 34fb9c93..31a739fb 100644 --- a/climetlab/plotting/backends/magics/backend.py +++ b/src/climetlab/plotting/backends/magics/backend.py @@ -13,20 +13,27 @@ import yaml import climetlab -from climetlab.core.ipython import SVG, Image +from climetlab.core.ipython import SVG +from climetlab.core.ipython import Image from climetlab.core.metadata import annotation from climetlab.core.temporary import temp_file from climetlab.utils.bbox import BoundingBox -from .actions import mcoast, mgrib, minput, mmap, mnetcdf, mtable, mtext, output, plot +from .actions import mcoast +from .actions import mgrib +from .actions import minput +from .actions import mmap +from .actions import mnetcdf +from .actions import mtable +from .actions import mtext +from .actions import output +from .actions import plot from .apply import apply LOG = logging.getLogger(__name__) -os.environ["MAGICS_UNITS_CONVERSIONS"] = os.path.join( - os.path.dirname(climetlab.__file__), "config", "units.yaml" -) +os.environ["MAGICS_UNITS_CONVERSIONS"] = os.path.join(os.path.dirname(climetlab.__file__), "config", "units.yaml") class Layer: @@ -162,14 +169,10 @@ def plot_numpy( west_east_increment = metadata.get("west_east_increment") if south_north_increment is None: - south_north_increment = (north - metadata.get("south", -90)) / ( - data.shape[-2] - 1 - ) + south_north_increment = (north - metadata.get("south", -90)) / (data.shape[-2] - 1) if west_east_increment is None: - west_east_increment = (metadata.get("east", 360) - west) / ( - data.shape[-1] - 1 - ) + west_east_increment = (metadata.get("east", 360) - west) / (data.shape[-1] - 1) # TODO: remove me when Magics supports full json def tidy(x): @@ -274,13 +277,9 @@ def apply_options(self, options): if options.provided("bounding_box"): bbox = options["bounding_box"] if isinstance(bbox, (list, tuple)): - self.bounding_box( - north=bbox[0], west=bbox[1], south=bbox[2], east=bbox[3] - ) + self.bounding_box(north=bbox[0], west=bbox[1], south=bbox[2], east=bbox[3]) else: - self.bounding_box( - north=bbox.north, west=bbox.west, south=bbox.south, east=bbox.east - ) + self.bounding_box(north=bbox.north, west=bbox.west, south=bbox.south, east=bbox.east) def option(self, name, default=None): return self._options(name, default) @@ -441,11 +440,7 @@ def save(self, path): file=f, ) else: - print( - yaml.dump( - dict(plot=[a.to_yaml() for a in args]), default_flow_style=False - ) - ) + print(yaml.dump(dict(plot=[a.to_yaml() for a in args]), default_flow_style=False)) self._options.check_unused() diff --git a/climetlab/plotting/backends/magics/colour.py b/src/climetlab/plotting/backends/magics/colour.py similarity index 100% rename from climetlab/plotting/backends/magics/colour.py rename to src/climetlab/plotting/backends/magics/colour.py diff --git a/climetlab/plotting/backends/magics/convertions.py b/src/climetlab/plotting/backends/magics/convertions.py similarity index 100% rename from climetlab/plotting/backends/magics/convertions.py rename to src/climetlab/plotting/backends/magics/convertions.py diff --git a/climetlab/plotting/backends/magics/magics.yaml b/src/climetlab/plotting/backends/magics/magics.yaml similarity index 99% rename from climetlab/plotting/backends/magics/magics.yaml rename to src/climetlab/plotting/backends/magics/magics.yaml index dc49f854..8241dab7 100644 --- a/climetlab/plotting/backends/magics/magics.yaml +++ b/src/climetlab/plotting/backends/magics/magics.yaml @@ -2310,4 +2310,3 @@ mtable: - default: true name: table_binning type: Bool - diff --git a/climetlab/plotting/options.py b/src/climetlab/plotting/options.py similarity index 100% rename from climetlab/plotting/options.py rename to src/climetlab/plotting/options.py diff --git a/climetlab/plotting/wms/__init__.py b/src/climetlab/plotting/wms/__init__.py similarity index 100% rename from climetlab/plotting/wms/__init__.py rename to src/climetlab/plotting/wms/__init__.py diff --git a/climetlab/plotting/wms/_folium.py b/src/climetlab/plotting/wms/_folium.py similarity index 96% rename from climetlab/plotting/wms/_folium.py rename to src/climetlab/plotting/wms/_folium.py index 4e258a94..cf9620c7 100644 --- a/climetlab/plotting/wms/_folium.py +++ b/src/climetlab/plotting/wms/_folium.py @@ -4,7 +4,8 @@ from folium.map import Layer from jinja2 import Template -from climetlab.core.ipython import HTML, guess_which_ipython +from climetlab.core.ipython import HTML +from climetlab.core.ipython import guess_which_ipython class SVGOverlay(Layer): @@ -139,8 +140,6 @@ def make_map(path, bbox, **kwargs): if guess_which_ipython()[0] == "deepnote": # For deepnote - html = html.replace("width: 100%;height: 100%", "width: 100%").replace( - "height: 100.0%;", "height: 609px;" - ) + html = html.replace("width: 100%;height: 100%", "width: 100%").replace("height: 100.0%;", "height: 609px;") return HTML(html) diff --git a/climetlab/plotting/wms/_ipyleafet.py b/src/climetlab/plotting/wms/_ipyleafet.py similarity index 100% rename from climetlab/plotting/wms/_ipyleafet.py rename to src/climetlab/plotting/wms/_ipyleafet.py diff --git a/climetlab/plotting/wms/wms.j2 b/src/climetlab/plotting/wms/wms.j2 similarity index 100% rename from climetlab/plotting/wms/wms.j2 rename to src/climetlab/plotting/wms/wms.j2 diff --git a/climetlab/plotting/wms/wms.js b/src/climetlab/plotting/wms/wms.js similarity index 100% rename from climetlab/plotting/wms/wms.js rename to src/climetlab/plotting/wms/wms.js diff --git a/climetlab/profiling.py b/src/climetlab/profiling.py similarity index 93% rename from climetlab/profiling.py rename to src/climetlab/profiling.py index e8e3e3e7..55310ded 100644 --- a/climetlab/profiling.py +++ b/src/climetlab/profiling.py @@ -15,7 +15,8 @@ from collections import defaultdict from contextlib import contextmanager -from climetlab.utils.humanize import number, seconds +from climetlab.utils.humanize import number +from climetlab.utils.humanize import seconds PROFILING = int(os.environ.get("CLIMETLAB_PROFILING", 0)) START = time.time() @@ -63,9 +64,7 @@ def __init__(self, name): def __repr__(self): extra = "" if len(self.threads) > 1: - extra = "\n threads:\n %s" % ( - "\n ".join(repr(t) for t in self.threads.values()), - ) + extra = "\n threads:\n %s" % ("\n ".join(repr(t) for t in self.threads.values()),) return "COUNTER [%s], %s%s" % ( self.name, self._c, diff --git a/climetlab/prompt.py b/src/climetlab/prompt.py similarity index 100% rename from climetlab/prompt.py rename to src/climetlab/prompt.py diff --git a/climetlab/readers/__init__.py b/src/climetlab/readers/__init__.py similarity index 97% rename from climetlab/readers/__init__.py rename to src/climetlab/readers/__init__.py index ee2416e5..61ec9834 100644 --- a/climetlab/readers/__init__.py +++ b/src/climetlab/readers/__init__.py @@ -123,9 +123,7 @@ def reader(source, path): if isinstance(reader, str): return _readers()[reader.replace("-", "_")](source, path, None, False) - raise TypeError( - "Provided reader must be a callable or a string, not %s" % type(reader) - ) + raise TypeError("Provided reader must be a callable or a string, not %s" % type(reader)) if os.path.isdir(path): from .directory import DirectoryReader diff --git a/climetlab/readers/archive.py b/src/climetlab/readers/archive.py similarity index 100% rename from climetlab/readers/archive.py rename to src/climetlab/readers/archive.py diff --git a/climetlab/readers/bufr.py b/src/climetlab/readers/bufr.py similarity index 100% rename from climetlab/readers/bufr.py rename to src/climetlab/readers/bufr.py diff --git a/climetlab/readers/bzip.py b/src/climetlab/readers/bzip.py similarity index 100% rename from climetlab/readers/bzip.py rename to src/climetlab/readers/bzip.py diff --git a/climetlab/readers/csv.py b/src/climetlab/readers/csv.py similarity index 100% rename from climetlab/readers/csv.py rename to src/climetlab/readers/csv.py diff --git a/climetlab/readers/directory.py b/src/climetlab/readers/directory.py similarity index 100% rename from climetlab/readers/directory.py rename to src/climetlab/readers/directory.py diff --git a/climetlab/readers/fwf.py b/src/climetlab/readers/fwf.py similarity index 100% rename from climetlab/readers/fwf.py rename to src/climetlab/readers/fwf.py diff --git a/climetlab/readers/grib/__init__.py b/src/climetlab/readers/grib/__init__.py similarity index 73% rename from climetlab/readers/grib/__init__.py rename to src/climetlab/readers/grib/__init__.py index b8b7ad28..f5e5ae9b 100644 --- a/climetlab/readers/grib/__init__.py +++ b/src/climetlab/readers/grib/__init__.py @@ -16,3 +16,11 @@ def reader(source, path, magic=None, deeper_check=False): from .reader import GRIBReader return GRIBReader(source, path) + + if deeper_check: + with open(path, "rb") as f: + magic = f.read(1024) + if b"GRIB" in magic: + from .reader import GRIBReader + + return GRIBReader(source, path) diff --git a/climetlab/readers/grib/codes.py b/src/climetlab/readers/grib/codes.py similarity index 96% rename from climetlab/readers/grib/codes.py rename to src/climetlab/readers/grib/codes.py index daa8dc04..714ad868 100644 --- a/climetlab/readers/grib/codes.py +++ b/src/climetlab/readers/grib/codes.py @@ -54,9 +54,7 @@ def to_xyz(lat, lon): cp = np.cos(phi) sp = np.sin(phi) - matrix = np.array( - [[cp * ct, sp, cp * st], [-ct * sp, cp, -sp * st], [-st, 0.0, ct]] - ) + matrix = np.array([[cp * ct, sp, cp * st], [-ct * sp, cp, -sp * st], [-st, 0.0, ct]]) return from_xyz(*np.dot(matrix, to_xyz(lat, lon))) @@ -131,9 +129,7 @@ def get(count): # For some reason, cffi can ge stuck in the GC if that function # needs to be called defined for the first time in a GC thread. try: - _h = eccodes.codes_new_from_samples( - "regular_ll_pl_grib1", eccodes.CODES_PRODUCT_GRIB - ) + _h = eccodes.codes_new_from_samples("regular_ll_pl_grib1", eccodes.CODES_PRODUCT_GRIB) eccodes.codes_release(_h) except: # noqa E722 pass @@ -147,9 +143,7 @@ def __init__(self, handle, path, offset): @classmethod def from_sample(cls, name): - return cls( - eccodes.codes_new_from_samples(name, eccodes.CODES_PRODUCT_GRIB), None, None - ) + return cls(eccodes.codes_new_from_samples(name, eccodes.CODES_PRODUCT_GRIB), None, None) def __del__(self): try: @@ -195,7 +189,11 @@ def get(self, name): if size and size > 1: return eccodes.codes_get_array(self.handle, name) - return eccodes.codes_get(self.handle, name) + result = eccodes.codes_get(self.handle, name) + + if result == "~" and name == "shortName": + return str(self.get("param")) + return result except eccodes.KeyValueNotFoundError: return None @@ -375,9 +373,7 @@ def handle(self): if self._handle_cache is not None: key = (self.path, self._offset) if key not in self._handle_cache: - self._handle_cache[key] = CodesReader.from_cache(self.path).at_offset( - self._offset - ) + self._handle_cache[key] = CodesReader.from_cache(self.path).at_offset(self._offset) return self._handle_cache[key] if self._handle is None: @@ -675,9 +671,7 @@ def grid_points(self): import numpy as np if self.rotated and not self.rotated_iterator: - warnings.warn( - f"ecCodes does not support rotated iterator for {self.grid_type}" - ) + warnings.warn(f"ecCodes does not support rotated iterator for {self.grid_type}") return self.grid_points_unrotated() data = self.data @@ -710,9 +704,7 @@ def grid_points_raw(self): return self.grid_points() if not self.rotated_iterator: - warnings.warn( - f"ecCodes does not support rotated iterator for {self.grid_type}" - ) + warnings.warn(f"ecCodes does not support rotated iterator for {self.grid_type}") data = self.handle.get_data() lat = np.array([d["lat"] for d in data]) lon = np.array([d["lon"] for d in data]) diff --git a/climetlab/readers/grib/fieldset.py b/src/climetlab/readers/grib/fieldset.py similarity index 100% rename from climetlab/readers/grib/fieldset.py rename to src/climetlab/readers/grib/fieldset.py diff --git a/climetlab/readers/grib/index/__init__.py b/src/climetlab/readers/grib/index/__init__.py similarity index 82% rename from climetlab/readers/grib/index/__init__.py rename to src/climetlab/readers/grib/index/__init__.py index 429f7263..387d5fc1 100644 --- a/climetlab/readers/grib/index/__init__.py +++ b/src/climetlab/readers/grib/index/__init__.py @@ -14,14 +14,15 @@ from lru import LRU -from climetlab.core.index import Index, MaskIndex, MultiIndex -from climetlab.decorators import normalize_grib_key_values, normalize_grib_keys -from climetlab.indexing.database import ( - FILEPARTS_KEY_NAMES, - MORE_KEY_NAMES, - MORE_KEY_NAMES_WITH_UNDERSCORE, - STATISTICS_KEY_NAMES, -) +from climetlab.core.index import Index +from climetlab.core.index import MaskIndex +from climetlab.core.index import MultiIndex +from climetlab.decorators import normalize_grib_key_values +from climetlab.decorators import normalize_grib_keys +from climetlab.indexing.database import FILEPARTS_KEY_NAMES +from climetlab.indexing.database import MORE_KEY_NAMES +from climetlab.indexing.database import MORE_KEY_NAMES_WITH_UNDERSCORE +from climetlab.indexing.database import STATISTICS_KEY_NAMES from climetlab.indexing.fieldset import FieldSet from climetlab.readers.grib.codes import GribField from climetlab.readers.grib.fieldset import FieldSetMixin @@ -35,9 +36,7 @@ class GribFieldSet(FieldSetMixin, FieldSet): _availability = None def __init__(self, *args, **kwargs): - if self.availability_path is not None and os.path.exists( - self.availability_path - ): + if self.availability_path is not None and os.path.exists(self.availability_path): self._availability = Availability(self.availability_path) Index.__init__(self, *args, **kwargs) @@ -93,9 +92,7 @@ def dicts(): return dict(available=available, missing=missing) - def _custom_availability( - self, keys=None, ignore_keys=None, filter_keys=lambda k: True - ): + def _custom_availability(self, keys=None, ignore_keys=None, filter_keys=lambda k: True): def dicts(): for i in progress_bar( iterable=range(len(self)), @@ -131,19 +128,12 @@ def availability(self): LOG.debug("Building availability") self._availability = self._custom_availability( - ignore_keys=FILEPARTS_KEY_NAMES - + STATISTICS_KEY_NAMES - + MORE_KEY_NAMES_WITH_UNDERSCORE - + MORE_KEY_NAMES + ignore_keys=FILEPARTS_KEY_NAMES + STATISTICS_KEY_NAMES + MORE_KEY_NAMES_WITH_UNDERSCORE + MORE_KEY_NAMES ) return self.availability def is_full_hypercube(self): - non_empty_coords = { - k: v - for k, v in self.availability._tree.unique_values().items() - if len(v) > 1 - } + non_empty_coords = {k: v for k, v in self.availability._tree.unique_values().items() if len(v) > 1} expected_size = math.prod([len(v) for k, v in non_empty_coords.items()]) return len(self) == expected_size @@ -170,14 +160,10 @@ class FieldSetInFiles(GribFieldSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - GRIB_FIELD_CACHE_SIZE = int( - os.environ.get("CLIMETLAB_GRIB_FIELD_CACHE_SIZE", 1000) - ) + GRIB_FIELD_CACHE_SIZE = int(os.environ.get("CLIMETLAB_GRIB_FIELD_CACHE_SIZE", 1000)) self._lru_cache = LRU(GRIB_FIELD_CACHE_SIZE) - CLIMETLAB_HANDLE_CACHE_SIZE = int( - os.environ.get("CLIMETLAB_HANDLE_CACHE_SIZE", 10) - ) + CLIMETLAB_HANDLE_CACHE_SIZE = int(os.environ.get("CLIMETLAB_HANDLE_CACHE_SIZE", 10)) self._handle_cache = LRU(CLIMETLAB_HANDLE_CACHE_SIZE) @@ -185,9 +171,7 @@ def _getitem(self, n): # TODO: check if we need a mutex here if n not in self._lru_cache: part = self.part(n) - self._lru_cache[n] = GribField( - part.path, part.offset, part.length, self._handle_cache - ) + self._lru_cache[n] = GribField(part.path, part.offset, part.length, self._handle_cache) return self._lru_cache[n] def __len__(self): diff --git a/climetlab/readers/grib/index/db.py b/src/climetlab/readers/grib/index/db.py similarity index 100% rename from climetlab/readers/grib/index/db.py rename to src/climetlab/readers/grib/index/db.py diff --git a/climetlab/readers/grib/index/file.py b/src/climetlab/readers/grib/index/file.py similarity index 100% rename from climetlab/readers/grib/index/file.py rename to src/climetlab/readers/grib/index/file.py diff --git a/climetlab/readers/grib/index/json.py b/src/climetlab/readers/grib/index/json.py similarity index 100% rename from climetlab/readers/grib/index/json.py rename to src/climetlab/readers/grib/index/json.py diff --git a/climetlab/readers/grib/index/sql.py b/src/climetlab/readers/grib/index/sql.py similarity index 85% rename from climetlab/readers/grib/index/sql.py rename to src/climetlab/readers/grib/index/sql.py index 0101a107..3e147189 100644 --- a/climetlab/readers/grib/index/sql.py +++ b/src/climetlab/readers/grib/index/sql.py @@ -11,15 +11,16 @@ from collections import namedtuple from climetlab.core.constants import DATETIME -from climetlab.core.order import build_remapping, normalize_order_by +from climetlab.core.order import build_remapping +from climetlab.core.order import normalize_order_by from climetlab.core.select import normalize_selection -from climetlab.decorators import cached_method, normalize, normalize_grib_key_values -from climetlab.indexing.database.sql import ( - SqlDatabase, - SqlOrder, - SqlRemapping, - SqlSelection, -) +from climetlab.decorators import cached_method +from climetlab.decorators import normalize +from climetlab.decorators import normalize_grib_key_values +from climetlab.indexing.database.sql import SqlDatabase +from climetlab.indexing.database.sql import SqlOrder +from climetlab.indexing.database.sql import SqlRemapping +from climetlab.indexing.database.sql import SqlSelection from climetlab.readers.grib.index.db import FieldsetInFilesWithDBIndex from climetlab.utils.serialise import register_serialisation @@ -96,9 +97,7 @@ def order_by(self, *args, remapping=None, **kwargs): return out def part(self, n): - if self._cache is None or not ( - self._cache.first <= n < self._cache.first + self._cache.length - ): + if self._cache is None or not (self._cache.first <= n < self._cache.first + self._cache.length): first = (n // self.DB_CACHE_SIZE) * self.DB_CACHE_SIZE result = self.db.lookup_parts(limit=self.DB_CACHE_SIZE, offset=first) self._cache = SqlResultCache(first, len(result), result) @@ -106,9 +105,7 @@ def part(self, n): def get_metadata(self, n): if self._dict_cache is None or not ( - self._dict_cache.first - <= n - < self._dict_cache.first + self._dict_cache.length + self._dict_cache.first <= n < self._dict_cache.first + self._dict_cache.length ): first = (n // self.DB_DICT_CACHE_SIZE) * self.DB_DICT_CACHE_SIZE result = self.db.lookup_dicts( @@ -130,7 +127,5 @@ def number_of_parts(self): register_serialisation( FieldsetInFilesWithSqlIndex, lambda x: [x.db.db_path, x.db._filters], - lambda x: FieldsetInFilesWithSqlIndex(db=SqlDatabase(x[0])).apply_filters( - filters=x[1] - ), + lambda x: FieldsetInFilesWithSqlIndex(db=SqlDatabase(x[0])).apply_filters(filters=x[1]), ) diff --git a/climetlab/readers/grib/output.py b/src/climetlab/readers/grib/output.py similarity index 93% rename from climetlab/readers/grib/output.py rename to src/climetlab/readers/grib/output.py index a2499f51..dbf15db8 100644 --- a/climetlab/readers/grib/output.py +++ b/src/climetlab/readers/grib/output.py @@ -11,7 +11,8 @@ import logging import re -from climetlab.decorators import normalize, normalize_grib_keys +from climetlab.decorators import normalize +from climetlab.decorators import normalize_grib_keys from climetlab.utils.humanize import list_to_human LOG = logging.getLogger(__name__) @@ -127,9 +128,7 @@ def write( metadata["missingValue"] = missing_value metadata["bitmapPresent"] = 1 - metadata = { - k: v for k, v in sorted(metadata.items(), key=lambda x: order(x[0])) - } + metadata = {k: v for k, v in sorted(metadata.items(), key=lambda x: order(x[0]))} if str(metadata.get("edition")) == "1": for k in NOT_IN_EDITION_1: @@ -145,9 +144,7 @@ def write( # Set values will set generatingProcessIdentifier to 255 if "generatingProcessIdentifier" in metadata: - handle.set( - "generatingProcessIdentifier", metadata["generatingProcessIdentifier"] - ) + handle.set("generatingProcessIdentifier", metadata["generatingProcessIdentifier"]) file, path = self.f(handle) handle.write(file) @@ -199,8 +196,8 @@ def update_metadata(self, handle, metadata, compulsary): if "number" in metadata: compulsary += ("numberOfForecastsInEnsemble",) productDefinitionTemplateNumber = {"tp": 11} - metadata["productDefinitionTemplateNumber"] = ( - productDefinitionTemplateNumber.get(handle.get("shortName"), 1) + metadata["productDefinitionTemplateNumber"] = productDefinitionTemplateNumber.get( + handle.get("shortName"), 1 ) if metadata.get("type") in ("pf", "cf"): @@ -236,9 +233,7 @@ def handle_from_metadata(self, values, metadata, compulsary): elif len(values.shape) == 2: sample = self._ll_field(values, metadata) else: - raise ValueError( - f"Invalid shape {values.shape} for GRIB, must be 1 or 2 dimension " - ) + raise ValueError(f"Invalid shape {values.shape} for GRIB, must be 1 or 2 dimension ") metadata.setdefault("bitsPerValue", 16) metadata["scanningMode"] = 0 @@ -262,12 +257,7 @@ def handle_from_metadata(self, values, metadata, compulsary): ) ) - if ( - "class" in metadata - or "type" in metadata - or "stream" in metadata - or "expver" in metadata - ): + if "class" in metadata or "type" in metadata or "stream" in metadata or "expver" in metadata: # MARS labelling metadata["setLocalDefinition"] = 1 # metadata['grib2LocalSectionNumber'] = 1 diff --git a/climetlab/readers/grib/pandas.py b/src/climetlab/readers/grib/pandas.py similarity index 100% rename from climetlab/readers/grib/pandas.py rename to src/climetlab/readers/grib/pandas.py diff --git a/climetlab/readers/grib/parsing.py b/src/climetlab/readers/grib/parsing.py similarity index 98% rename from climetlab/readers/grib/parsing.py rename to src/climetlab/readers/grib/parsing.py index dee27d46..f58f1fef 100644 --- a/climetlab/readers/grib/parsing.py +++ b/src/climetlab/readers/grib/parsing.py @@ -11,12 +11,14 @@ import os import sys import time -from multiprocessing import Process, Queue +from multiprocessing import Process +from multiprocessing import Queue from tqdm import tqdm from climetlab.utils import progress_bar -from climetlab.utils.humanize import plural, seconds +from climetlab.utils.humanize import plural +from climetlab.utils.humanize import seconds LOG = logging.getLogger(__name__) diff --git a/climetlab/readers/grib/pytorch.py b/src/climetlab/readers/grib/pytorch.py similarity index 92% rename from climetlab/readers/grib/pytorch.py rename to src/climetlab/readers/grib/pytorch.py index de971925..40e54d34 100644 --- a/climetlab/readers/grib/pytorch.py +++ b/src/climetlab/readers/grib/pytorch.py @@ -11,7 +11,8 @@ import numpy as np -from .tensorflow import default_merger, to_funcs +from .tensorflow import default_merger +from .tensorflow import to_funcs LOG = logging.getLogger(__name__) @@ -36,9 +37,7 @@ def to_pytorch( import torch - func, func_targets = to_funcs( - features, targets, options, targets_options, merger, targets_merger - ) + func, func_targets = to_funcs(features, targets, options, targets_options, merger, targets_merger) class ClimetlabTorchDataset(torch.utils.data.Dataset): def __len__(self): diff --git a/climetlab/readers/grib/reader.py b/src/climetlab/readers/grib/reader.py similarity index 100% rename from climetlab/readers/grib/reader.py rename to src/climetlab/readers/grib/reader.py diff --git a/climetlab/readers/grib/tensorflow.py b/src/climetlab/readers/grib/tensorflow.py similarity index 91% rename from climetlab/readers/grib/tensorflow.py rename to src/climetlab/readers/grib/tensorflow.py index 1bc198c3..c08abf62 100644 --- a/climetlab/readers/grib/tensorflow.py +++ b/src/climetlab/readers/grib/tensorflow.py @@ -147,9 +147,7 @@ def wrap(i): def normalize_a_b(option, dataset): - if isinstance(option, (tuple, list)) and all( - [isinstance(x, Number) for x in option] - ): + if isinstance(option, (tuple, list)) and all([isinstance(x, Number) for x in option]): a, b = option return a, b @@ -157,9 +155,7 @@ def normalize_a_b(option, dataset): stats = dataset.statistics() average, stdev = stats["average"], stats["stdev"] if stdev < (average * 1e-6): - warnings.warn( - f"Normalizing: the field seems to have only one value {stats}" - ) + warnings.warn(f"Normalizing: the field seems to have only one value {stats}") return 1 / stdev, -average / stdev if option == "min-max": @@ -167,9 +163,7 @@ def normalize_a_b(option, dataset): mini, maxi = stats["minimum"], stats["maximum"] x = maxi - mini if x < 1e-9: - warnings.warn( - f"Normalizing: the field seems to have only one value {stats}." - ) + warnings.warn(f"Normalizing: the field seems to have only one value {stats}.") return 1 / x, -mini / x raise ValueError(option) @@ -187,13 +181,8 @@ def to_funcs(features, targets, options, targets_options, merger, targets_merger assert isinstance(features, (list, tuple)), features assert len(features) == len(options), (len(features), len(options)) - funcs = [ - as_numpy_func(_, opt) for _, opt in zip_longest(features, options, fillvalue={}) - ] - funcs_targets = [ - as_numpy_func(_, opt) - for _, opt in zip_longest(targets, targets_options, fillvalue={}) - ] + funcs = [as_numpy_func(_, opt) for _, opt in zip_longest(features, options, fillvalue={})] + funcs_targets = [as_numpy_func(_, opt) for _, opt in zip_longest(targets, targets_options, fillvalue={})] func = merger(*funcs) func_targets = targets_merger(*funcs_targets) @@ -220,9 +209,7 @@ def to_tfdataset2( import tensorflow as tf - func, func_targets = to_funcs( - features, targets, options, targets_options, merger, targets_merger - ) + func, func_targets = to_funcs(features, targets, options, targets_options, merger, targets_merger) indices = tf.data.Dataset.range(total_size) if shuffle_buffer_size: @@ -298,9 +285,7 @@ def to_tfdataset(self, *args, **kwargs): return to_tfdataset2(*args, **kwargs) - def to_tfdataset_( - self, *others, align_with=None, total_size=None, merger=default_merger - ): + def to_tfdataset_(self, *others, align_with=None, total_size=None, merger=default_merger): import tensorflow as tf if align_with is not None: diff --git a/climetlab/readers/grib/xarray.py b/src/climetlab/readers/grib/xarray.py similarity index 90% rename from climetlab/readers/grib/xarray.py rename to src/climetlab/readers/grib/xarray.py index 9a453034..44a082f8 100644 --- a/climetlab/readers/grib/xarray.py +++ b/src/climetlab/readers/grib/xarray.py @@ -12,7 +12,8 @@ import warnings from climetlab.utils.kwargs import Kwargs -from climetlab.utils.serialise import deserialise_state, serialise_state +from climetlab.utils.serialise import deserialise_state +from climetlab.utils.serialise import serialise_state LOG = logging.getLogger(__name__) @@ -65,20 +66,14 @@ def to_xarray(self, **kwargs): xarray_open_dataset_kwargs = {} if "xarray_open_mfdataset_kwargs" in kwargs: - warnings.warn( - "xarray_open_mfdataset_kwargs is deprecated, please use xarray_open_dataset_kwargs instead." - ) - kwargs["xarray_open_dataset_kwargs"] = kwargs.pop( - "xarray_open_mfdataset_kwargs" - ) + warnings.warn("xarray_open_mfdataset_kwargs is deprecated, please use xarray_open_dataset_kwargs instead.") + kwargs["xarray_open_dataset_kwargs"] = kwargs.pop("xarray_open_mfdataset_kwargs") user_xarray_open_dataset_kwargs = kwargs.get("xarray_open_dataset_kwargs", {}) # until ignore_keys is included into cfgrib, # it is implemented here directly - ignore_keys = user_xarray_open_dataset_kwargs.get("backend_kwargs", {}).pop( - "ignore_keys", [] - ) + ignore_keys = user_xarray_open_dataset_kwargs.get("backend_kwargs", {}).pop("ignore_keys", []) for key in ["backend_kwargs"]: xarray_open_dataset_kwargs[key] = Kwargs( diff --git a/climetlab/readers/matlab.py b/src/climetlab/readers/matlab.py similarity index 100% rename from climetlab/readers/matlab.py rename to src/climetlab/readers/matlab.py diff --git a/climetlab/readers/netcdf/__init__.py b/src/climetlab/readers/netcdf/__init__.py similarity index 100% rename from climetlab/readers/netcdf/__init__.py rename to src/climetlab/readers/netcdf/__init__.py diff --git a/climetlab/readers/netcdf/coords.py b/src/climetlab/readers/netcdf/coords.py similarity index 100% rename from climetlab/readers/netcdf/coords.py rename to src/climetlab/readers/netcdf/coords.py diff --git a/climetlab/readers/netcdf/dataset.py b/src/climetlab/readers/netcdf/dataset.py similarity index 100% rename from climetlab/readers/netcdf/dataset.py rename to src/climetlab/readers/netcdf/dataset.py diff --git a/climetlab/readers/netcdf/field.py b/src/climetlab/readers/netcdf/field.py similarity index 94% rename from climetlab/readers/netcdf/field.py rename to src/climetlab/readers/netcdf/field.py index cd9298e8..289358d7 100644 --- a/climetlab/readers/netcdf/field.py +++ b/src/climetlab/readers/netcdf/field.py @@ -16,7 +16,8 @@ from climetlab.indexing.fieldset import Field from climetlab.utils.bbox import BoundingBox -from .coords import LevelSlice, TimeSlice +from .coords import LevelSlice +from .coords import TimeSlice from .dataset import DataSet @@ -66,9 +67,7 @@ def __init__(self, owner, ds, variable, slices, non_dim_coords): def plot_map(self, backend): dimensions = dict((s.name, s.index) for s in self.slices) - backend.bounding_box( - north=self.north, south=self.south, west=self.west, east=self.east - ) + backend.bounding_box(north=self.north, south=self.south, west=self.west, east=self.east) backend.plot_netcdf(self.owner.path, self.variable, dimensions) @@ -94,11 +93,7 @@ def tidy(x): return {k: tidy(v) for k, v in x.items()} return x - return tidy( - self.owner.xr_dataset[ - self.owner.xr_dataset[self.variable].grid_mapping - ].attrs - ) + return tidy(self.owner.xr_dataset[self.owner.xr_dataset[self.variable].grid_mapping].attrs) # Compatibility to GRIb fields below diff --git a/climetlab/readers/netcdf/fieldset.py b/src/climetlab/readers/netcdf/fieldset.py similarity index 97% rename from climetlab/readers/netcdf/fieldset.py rename to src/climetlab/readers/netcdf/fieldset.py index 1527caf6..76551b14 100644 --- a/climetlab/readers/netcdf/fieldset.py +++ b/src/climetlab/readers/netcdf/fieldset.py @@ -10,12 +10,15 @@ from functools import cached_property from itertools import product -from climetlab.core.index import MaskIndex, MultiIndex +from climetlab.core.index import MaskIndex +from climetlab.core.index import MultiIndex from climetlab.indexing.fieldset import FieldSet from climetlab.utils.bbox import BoundingBox from climetlab.utils.dates import to_datetime -from .coords import LevelCoordinate, OtherCoordinate, TimeCoordinate +from .coords import LevelCoordinate +from .coords import OtherCoordinate +from .coords import TimeCoordinate from .dataset import DataSet from .field import NetCDFField diff --git a/climetlab/readers/numpy.py b/src/climetlab/readers/numpy.py similarity index 100% rename from climetlab/readers/numpy.py rename to src/climetlab/readers/numpy.py diff --git a/climetlab/readers/odb.py b/src/climetlab/readers/odb.py similarity index 100% rename from climetlab/readers/odb.py rename to src/climetlab/readers/odb.py diff --git a/climetlab/readers/tar.py b/src/climetlab/readers/tar.py similarity index 100% rename from climetlab/readers/tar.py rename to src/climetlab/readers/tar.py diff --git a/climetlab/readers/text.py b/src/climetlab/readers/text.py similarity index 96% rename from climetlab/readers/text.py rename to src/climetlab/readers/text.py index 6ddae789..e957c14a 100644 --- a/climetlab/readers/text.py +++ b/src/climetlab/readers/text.py @@ -9,7 +9,8 @@ from . import Reader -from .csv import CSVReader, is_csv +from .csv import CSVReader +from .csv import is_csv def is_text(path, prob_lines=1000, probe_size=4096): diff --git a/climetlab/readers/tfrecord.py b/src/climetlab/readers/tfrecord.py similarity index 100% rename from climetlab/readers/tfrecord.py rename to src/climetlab/readers/tfrecord.py diff --git a/climetlab/readers/unknown.py b/src/climetlab/readers/unknown.py similarity index 100% rename from climetlab/readers/unknown.py rename to src/climetlab/readers/unknown.py diff --git a/climetlab/readers/zip.py b/src/climetlab/readers/zip.py similarity index 100% rename from climetlab/readers/zip.py rename to src/climetlab/readers/zip.py diff --git a/climetlab/scripts/__init__.py b/src/climetlab/scripts/__init__.py similarity index 100% rename from climetlab/scripts/__init__.py rename to src/climetlab/scripts/__init__.py diff --git a/climetlab/scripts/availability.py b/src/climetlab/scripts/availability.py similarity index 100% rename from climetlab/scripts/availability.py rename to src/climetlab/scripts/availability.py diff --git a/climetlab/scripts/benchmark.py b/src/climetlab/scripts/benchmark.py similarity index 97% rename from climetlab/scripts/benchmark.py rename to src/climetlab/scripts/benchmark.py index 856a5028..cff63a63 100644 --- a/climetlab/scripts/benchmark.py +++ b/src/climetlab/scripts/benchmark.py @@ -16,7 +16,8 @@ import climetlab as cml from .benchmarks.indexed_url import benchmark as benchmark_indexed_url -from .tools import experimental, parse_args +from .tools import experimental +from .tools import parse_args home = os.path.expanduser("~") @@ -64,7 +65,7 @@ def _get_selection(self): class GribExp(Exp): def get_ds(self): - dic, isel_dic = self._get_selection() + dic, _ = self._get_selection() self.ds = cml.load_source("local", self.directory, dic) print(len(self.ds)) @@ -91,7 +92,7 @@ def get_ds(self): def get_values(self): print(self.ds) - dic, isel_dic = self._get_selection() + _, isel_dic = self._get_selection() ds = self.ds.isel(**isel_dic) for v in ds.variables: diff --git a/climetlab/scripts/benchmarks/__init__.py b/src/climetlab/scripts/benchmarks/__init__.py similarity index 100% rename from climetlab/scripts/benchmarks/__init__.py rename to src/climetlab/scripts/benchmarks/__init__.py diff --git a/climetlab/scripts/benchmarks/indexed_url.py b/src/climetlab/scripts/benchmarks/indexed_url.py similarity index 98% rename from climetlab/scripts/benchmarks/indexed_url.py rename to src/climetlab/scripts/benchmarks/indexed_url.py index 2e621620..5c2947aa 100644 --- a/climetlab/scripts/benchmarks/indexed_url.py +++ b/src/climetlab/scripts/benchmarks/indexed_url.py @@ -11,7 +11,8 @@ import time from climetlab import load_source -from climetlab.core.statistics import collect_statistics, retrieve_statistics +from climetlab.core.statistics import collect_statistics +from climetlab.core.statistics import retrieve_statistics CML_BASEURL_S3 = "https://object-store.os-api.cci1.ecmwf.int/climetlab" CML_BASEURL_CDS = "https://datastore.copernicus-climate.eu/climetlab" diff --git a/climetlab/scripts/cache.py b/src/climetlab/scripts/cache.py similarity index 97% rename from climetlab/scripts/cache.py rename to src/climetlab/scripts/cache.py index 24927d9c..754fb57d 100644 --- a/climetlab/scripts/cache.py +++ b/src/climetlab/scripts/cache.py @@ -16,10 +16,12 @@ from termcolor import colored from climetlab.core.settings import SETTINGS -from climetlab.utils import humanize, tqdm +from climetlab.utils import humanize +from climetlab.utils import tqdm from climetlab.utils.dates import to_datetime -from .tools import parse_args, print_table +from .tools import parse_args +from .tools import print_table LOG = logging.getLogger(__name__) @@ -155,9 +157,7 @@ class CacheCmd: @parse_args( json=dict(action="store_true", help="produce a JSON output"), all=dict(action="store_true"), - path=dict( - action="store_true", help="print the path of cache directory and exit" - ), + path=dict(action="store_true", help="print the path of cache directory and exit"), sort=dict( type=str, metavar="KEY", @@ -176,7 +176,8 @@ def do_cache(self, args): command. Examples: climetlab cache --all """ - from climetlab.core.caching import cache_directory, dump_cache_database + from climetlab.core.caching import cache_directory + from climetlab.core.caching import dump_cache_database if args.path: print(cache_directory()) diff --git a/climetlab/scripts/check.py b/src/climetlab/scripts/check.py similarity index 92% rename from climetlab/scripts/check.py rename to src/climetlab/scripts/check.py index 379dff3e..2e37b246 100644 --- a/climetlab/scripts/check.py +++ b/src/climetlab/scripts/check.py @@ -16,14 +16,18 @@ from termcolor import colored -from .tools import experimental, parse_args, print_table +from .tools import experimental +from .tools import parse_args +from .tools import print_table def version(module): try: - from importlib.metadata import PackageNotFoundError, version + from importlib.metadata import PackageNotFoundError + from importlib.metadata import version except Exception: - from importlib_metadata import PackageNotFoundError, version + from importlib_metadata import PackageNotFoundError + from importlib_metadata import version try: return version(module) @@ -92,9 +96,7 @@ def do_check(self, args): for name in ["eccodes", "magics"]: try: - print( - f" {name} from ecmwflibs: ok {versions[name]} ({ecmwflibs.find(name)})" - ) + print(f" {name} from ecmwflibs: ok {versions[name]} ({ecmwflibs.find(name)})") except Exception as e: # noqa: F841 print(f" {name} from ecmwflib: Warning: ecmwflibs cannot find {name}") @@ -119,17 +121,13 @@ def do_check(self, args): continue # if name == "eccodes": # more = f" (using .lib={lib.lib})" - print( - f" {name}: ok {lib.__version__} ({os.path.dirname(lib.__file__)}){more}" - ) + print(f" {name}: ok {lib.__version__} ({os.path.dirname(lib.__file__)}){more}") print("Checking optional dependencies...") for name in ["folium", "pdbufr", "pyodc"]: try: lib = import_module(name) - print( - f" {name}: ok {lib.__version__} ({os.path.dirname(lib.__file__)})" - ) + print(f" {name}: ok {lib.__version__} ({os.path.dirname(lib.__file__)})") except Exception as e: print(e) print(f" Warning: cannot import {name}. Limited capabilities.") @@ -237,9 +235,7 @@ def do_versions(self, args): if args.json: print(json.dumps(result, indent=4, sort_keys=True)) else: - COLORS = dict( - missing="red", damaged="red", builtin="blue", namespace="magenta" - ) + COLORS = dict(missing="red", damaged="red", builtin="blue", namespace="magenta") items = [] colours = [] for k, v in sorted(result.items()): diff --git a/climetlab/scripts/completion.py b/src/climetlab/scripts/completion.py similarity index 100% rename from climetlab/scripts/completion.py rename to src/climetlab/scripts/completion.py diff --git a/climetlab/scripts/grib.py b/src/climetlab/scripts/grib.py similarity index 94% rename from climetlab/scripts/grib.py rename to src/climetlab/scripts/grib.py index c9d6a4a2..a1f0a51d 100644 --- a/climetlab/scripts/grib.py +++ b/src/climetlab/scripts/grib.py @@ -13,10 +13,8 @@ import sys from climetlab.indexing.database.json import JsonStdoutDatabase -from climetlab.readers.grib.parsing import ( - GribIndexingDirectoryParserIterator, - _index_url, -) +from climetlab.readers.grib.parsing import GribIndexingDirectoryParserIterator +from climetlab.readers.grib.parsing import _index_url from .tools import parse_args @@ -85,9 +83,7 @@ def do_index_url(self, args): ), output=( "--output", - dict( - help="Custom location of the database file, will write absolute filenames in the database." - ), + dict(help="Custom location of the database file, will write absolute filenames in the database."), ), ) def do_index_directory(self, args): diff --git a/climetlab/scripts/grib_info.py b/src/climetlab/scripts/grib_info.py similarity index 100% rename from climetlab/scripts/grib_info.py rename to src/climetlab/scripts/grib_info.py diff --git a/climetlab/scripts/main.py b/src/climetlab/scripts/main.py similarity index 95% rename from climetlab/scripts/main.py rename to src/climetlab/scripts/main.py index 1795f9b8..865da728 100644 --- a/climetlab/scripts/main.py +++ b/src/climetlab/scripts/main.py @@ -105,9 +105,7 @@ def default(self, line): cmd = colored(line.split()[0], "yellow") help = colored("help", "yellow") - print( - f"Unknown command {cmd}. Type {help} for the list of known command names." - ) + print(f"Unknown command {cmd}. Type {help} for the list of known command names.") def onecmd(self, line): try: @@ -143,9 +141,7 @@ def main(): nargs=argparse.REMAINDER, ) - p.add_argument( - "-h", "--help", action="store_true", help="show this help message and exit" - ) + p.add_argument("-h", "--help", action="store_true", help="show this help message and exit") p.add_argument("-v", "--version", action="store_true", help="show version and exit") args = p.parse_args() if args.version: diff --git a/climetlab/scripts/settings.py b/src/climetlab/scripts/settings.py similarity index 98% rename from climetlab/scripts/settings.py rename to src/climetlab/scripts/settings.py index 5fe79571..abe9ef3d 100644 --- a/climetlab/scripts/settings.py +++ b/src/climetlab/scripts/settings.py @@ -12,7 +12,8 @@ from termcolor import colored -from .tools import parse_args, print_table +from .tools import parse_args +from .tools import print_table class SettingsCmd: diff --git a/climetlab/scripts/test_data.py b/src/climetlab/scripts/test_data.py similarity index 100% rename from climetlab/scripts/test_data.py rename to src/climetlab/scripts/test_data.py diff --git a/climetlab/scripts/tools.py b/src/climetlab/scripts/tools.py similarity index 93% rename from climetlab/scripts/tools.py rename to src/climetlab/scripts/tools.py index 8e0c8c64..1e4cb25f 100644 --- a/climetlab/scripts/tools.py +++ b/src/climetlab/scripts/tools.py @@ -32,9 +32,7 @@ def wrapper(func): add_help=False, ) # custom help to avoid exiting from climetlab cli. - p.add_argument( - "-h", "--help", action="store_true", help="show this help message and exit" - ) + p.add_argument("-h", "--help", action="store_true", help="show this help message and exit") for k, v in kwargs.items(): k = k.replace("_", "-") @@ -51,7 +49,7 @@ def wrapper(func): lst = [k] if lst[0] is None: lst[0] = k - if not lst[0] in [k, f"--{k}"]: + if lst[0] not in [k, f"--{k}"]: lst = [f"--{k}"] + lst p.add_argument(*lst, **dic) diff --git a/climetlab/sources/__init__.py b/src/climetlab/sources/__init__.py similarity index 97% rename from climetlab/sources/__init__.py rename to src/climetlab/sources/__init__.py index 30db784b..f261f39c 100644 --- a/climetlab/sources/__init__.py +++ b/src/climetlab/sources/__init__.py @@ -145,9 +145,7 @@ def __call__(self, name, *args, **kwargs): ) if name.startswith("http://") or name.startswith("https://"): - raise ValueError( - f'"{name}" is not a valid source name. Did you mean load_source("url", "{name}") ?' - ) + raise ValueError(f'"{name}" is not a valid source name. Did you mean load_source("url", "{name}") ?') klass = find_plugin(os.path.dirname(__file__), name, loader) diff --git a/climetlab/sources/ads.py b/src/climetlab/sources/ads.py similarity index 100% rename from climetlab/sources/ads.py rename to src/climetlab/sources/ads.py diff --git a/climetlab/sources/cds.py b/src/climetlab/sources/cds.py similarity index 100% rename from climetlab/sources/cds.py rename to src/climetlab/sources/cds.py diff --git a/climetlab/sources/climetlab_testing.py b/src/climetlab/sources/climetlab_testing.py similarity index 100% rename from climetlab/sources/climetlab_testing.py rename to src/climetlab/sources/climetlab_testing.py diff --git a/climetlab/sources/constants.py b/src/climetlab/sources/constants.py similarity index 98% rename from climetlab/sources/constants.py rename to src/climetlab/sources/constants.py index de45f0de..8ca2ea12 100644 --- a/climetlab/sources/constants.py +++ b/src/climetlab/sources/constants.py @@ -1,4 +1,4 @@ -# (C) Copyright 2020 ECMWF. +src/climetlab/sources/constants.py# (C) Copyright 2020 ECMWF. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -10,6 +10,7 @@ import datetime import itertools import logging +from warnings import warn import numpy as np @@ -151,9 +152,9 @@ def sin_local_time(self, date): return np.sin(radians) def insolation(self, date): - # warn( - # "The function `insolation` is deprecated, please use `cos_solar_zenith_angle` instead" - # ) + warn( + "The function `insolation` is deprecated, please use `cos_solar_zenith_angle` instead" + ) return self.cos_solar_zenith_angle(date) def toa_incident_solar_radiation(self, date): diff --git a/climetlab/sources/dummy.grib b/src/climetlab/sources/dummy.grib similarity index 100% rename from climetlab/sources/dummy.grib rename to src/climetlab/sources/dummy.grib diff --git a/climetlab/sources/ecmwf_api.py b/src/climetlab/sources/ecmwf_api.py similarity index 97% rename from climetlab/sources/ecmwf_api.py rename to src/climetlab/sources/ecmwf_api.py index 72dfb65c..02271f85 100644 --- a/climetlab/sources/ecmwf_api.py +++ b/src/climetlab/sources/ecmwf_api.py @@ -81,9 +81,7 @@ def retrieve(target, request): @normalize("param", "variable-list(mars)") @normalize("date", "date-list(%Y-%m-%d)") - @normalize( - "area", "maybe-bbox(list)" - ) # Bounding box checks fails with rotated grids + @normalize("area", "maybe-bbox(list)") # Bounding box checks fails with rotated grids def requests(self, **kwargs): def value_to_list(v): diff --git a/climetlab/sources/ecmwf_data_server.py b/src/climetlab/sources/ecmwf_data_server.py similarity index 100% rename from climetlab/sources/ecmwf_data_server.py rename to src/climetlab/sources/ecmwf_data_server.py diff --git a/climetlab/sources/ecmwf_data_server_base.py b/src/climetlab/sources/ecmwf_data_server_base.py similarity index 100% rename from climetlab/sources/ecmwf_data_server_base.py rename to src/climetlab/sources/ecmwf_data_server_base.py diff --git a/climetlab/sources/ecmwf_open_data.py b/src/climetlab/sources/ecmwf_open_data.py similarity index 100% rename from climetlab/sources/ecmwf_open_data.py rename to src/climetlab/sources/ecmwf_open_data.py diff --git a/climetlab/sources/ecmwf_research_experiment.py b/src/climetlab/sources/ecmwf_research_experiment.py similarity index 100% rename from climetlab/sources/ecmwf_research_experiment.py rename to src/climetlab/sources/ecmwf_research_experiment.py diff --git a/climetlab/sources/empty.py b/src/climetlab/sources/empty.py similarity index 100% rename from climetlab/sources/empty.py rename to src/climetlab/sources/empty.py diff --git a/climetlab/sources/era5_accumulations.py b/src/climetlab/sources/era5_accumulations.py similarity index 95% rename from climetlab/sources/era5_accumulations.py rename to src/climetlab/sources/era5_accumulations.py index bdb58079..a5ca4bee 100644 --- a/climetlab/sources/era5_accumulations.py +++ b/src/climetlab/sources/era5_accumulations.py @@ -149,21 +149,14 @@ def __init__(self, *args, **kwargs): requested.add(user_date + datetime.timedelta(hours=user_time)) - when = ( - user_date - + datetime.timedelta(hours=user_time) - - datetime.timedelta(hours=user_step) - ) + when = user_date + datetime.timedelta(hours=user_time) - datetime.timedelta(hours=user_step) add_step = 0 while when.hour not in (6, 18): when -= datetime.timedelta(hours=stepping) add_step += stepping - steps = tuple( - step + add_step - for step in range(stepping, user_step + stepping, stepping) - ) + steps = tuple(step + add_step for step in range(stepping, user_step + stepping, stepping)) for p in param: for n in number: diff --git a/climetlab/sources/fdb.py b/src/climetlab/sources/fdb.py similarity index 100% rename from climetlab/sources/fdb.py rename to src/climetlab/sources/fdb.py diff --git a/climetlab/sources/file.py b/src/climetlab/sources/file.py similarity index 100% rename from climetlab/sources/file.py rename to src/climetlab/sources/file.py diff --git a/climetlab/sources/file_pattern.py b/src/climetlab/sources/file_pattern.py similarity index 100% rename from climetlab/sources/file_pattern.py rename to src/climetlab/sources/file_pattern.py diff --git a/climetlab/sources/indexed.py b/src/climetlab/sources/indexed.py similarity index 100% rename from climetlab/sources/indexed.py rename to src/climetlab/sources/indexed.py diff --git a/climetlab/sources/indexed_directory.py b/src/climetlab/sources/indexed_directory.py similarity index 100% rename from climetlab/sources/indexed_directory.py rename to src/climetlab/sources/indexed_directory.py diff --git a/climetlab/sources/indexed_url.py b/src/climetlab/sources/indexed_url.py similarity index 89% rename from climetlab/sources/indexed_url.py rename to src/climetlab/sources/indexed_url.py index 36acb6c3..7a21b019 100644 --- a/climetlab/sources/indexed_url.py +++ b/src/climetlab/sources/indexed_url.py @@ -10,7 +10,8 @@ from climetlab.readers.grib.index.sql import FieldsetInFilesWithSqlIndex from climetlab.sources.indexed import IndexedSource -from climetlab.sources.indexed_urls import add_path, get_index_url +from climetlab.sources.indexed_urls import add_path +from climetlab.sources.indexed_urls import get_index_url class IndexedUrl(IndexedSource): diff --git a/climetlab/sources/indexed_url_with_json_index.py b/src/climetlab/sources/indexed_url_with_json_index.py similarity index 100% rename from climetlab/sources/indexed_url_with_json_index.py rename to src/climetlab/sources/indexed_url_with_json_index.py diff --git a/climetlab/sources/indexed_urls.py b/src/climetlab/sources/indexed_urls.py similarity index 94% rename from climetlab/sources/indexed_urls.py rename to src/climetlab/sources/indexed_urls.py index 9419410f..a114fc50 100644 --- a/climetlab/sources/indexed_urls.py +++ b/src/climetlab/sources/indexed_urls.py @@ -43,9 +43,7 @@ def __init__( **kwargs, ): if isinstance(pattern, PerUrlIndex): - warnings.warn( - "Passing a PerUrlIndex object is obsolete, please update your code." - ) + warnings.warn("Passing a PerUrlIndex object is obsolete, please update your code.") pattern = pattern.pattern print("PATTERN", pattern) diff --git a/climetlab/sources/loader.py b/src/climetlab/sources/loader.py similarity index 100% rename from climetlab/sources/loader.py rename to src/climetlab/sources/loader.py diff --git a/climetlab/sources/mars.py b/src/climetlab/sources/mars.py similarity index 96% rename from climetlab/sources/mars.py rename to src/climetlab/sources/mars.py index d3e1526a..4481b960 100644 --- a/climetlab/sources/mars.py +++ b/src/climetlab/sources/mars.py @@ -16,7 +16,8 @@ from climetlab.core.settings import SETTINGS from climetlab.core.temporary import temp_file -from .ecmwf_api import ECMWFApi, MARSAPIKeyPrompt +from .ecmwf_api import ECMWFApi +from .ecmwf_api import MARSAPIKeyPrompt LOG = logging.getLogger(__name__) diff --git a/climetlab/sources/metview.py b/src/climetlab/sources/metview.py similarity index 100% rename from climetlab/sources/metview.py rename to src/climetlab/sources/metview.py diff --git a/climetlab/sources/multi.py b/src/climetlab/sources/multi.py similarity index 98% rename from climetlab/sources/multi.py rename to src/climetlab/sources/multi.py index ebcd2a06..1c59147b 100644 --- a/climetlab/sources/multi.py +++ b/src/climetlab/sources/multi.py @@ -11,7 +11,8 @@ import logging from climetlab.core.thread import SoftThreadPool -from climetlab.mergers import make_merger, merge_by_class +from climetlab.mergers import make_merger +from climetlab.mergers import merge_by_class from climetlab.sources.empty import EmptySource from climetlab.utils import tqdm from climetlab.utils.bbox import BoundingBox diff --git a/climetlab/sources/multi_url.py b/src/climetlab/sources/multi_url.py similarity index 100% rename from climetlab/sources/multi_url.py rename to src/climetlab/sources/multi_url.py diff --git a/climetlab/sources/opendap.py b/src/climetlab/sources/opendap.py similarity index 100% rename from climetlab/sources/opendap.py rename to src/climetlab/sources/opendap.py diff --git a/climetlab/sources/oper_accumulations.py b/src/climetlab/sources/oper_accumulations.py similarity index 100% rename from climetlab/sources/oper_accumulations.py rename to src/climetlab/sources/oper_accumulations.py diff --git a/climetlab/sources/prompt.py b/src/climetlab/sources/prompt.py similarity index 97% rename from climetlab/sources/prompt.py rename to src/climetlab/sources/prompt.py index b4990994..38b4cade 100644 --- a/climetlab/sources/prompt.py +++ b/src/climetlab/sources/prompt.py @@ -16,7 +16,9 @@ import markdown -from climetlab.core.ipython import HTML, display, ipython_active +from climetlab.core.ipython import HTML +from climetlab.core.ipython import display +from climetlab.core.ipython import ipython_active LOG = logging.getLogger(__name__) diff --git a/climetlab/sources/sentinel_hub.py b/src/climetlab/sources/sentinel_hub.py similarity index 100% rename from climetlab/sources/sentinel_hub.py rename to src/climetlab/sources/sentinel_hub.py diff --git a/climetlab/sources/url.py b/src/climetlab/sources/url.py similarity index 100% rename from climetlab/sources/url.py rename to src/climetlab/sources/url.py diff --git a/climetlab/sources/url_pattern.py b/src/climetlab/sources/url_pattern.py similarity index 85% rename from climetlab/sources/url_pattern.py rename to src/climetlab/sources/url_pattern.py index 8ab81a83..dc9bc1f9 100644 --- a/climetlab/sources/url_pattern.py +++ b/src/climetlab/sources/url_pattern.py @@ -14,9 +14,7 @@ class UrlPattern(MultiUrl): def __init__(self, pattern, *args, filter=None, merger=None, force=False, **kwargs): urls = Pattern(pattern).substitute(*args, **kwargs) - super().__init__( - urls, *args, filter=filter, merger=merger, force=force, **kwargs - ) + super().__init__(urls, *args, filter=filter, merger=merger, force=force, **kwargs) source = UrlPattern diff --git a/climetlab/sources/virtual.py b/src/climetlab/sources/virtual.py similarity index 100% rename from climetlab/sources/virtual.py rename to src/climetlab/sources/virtual.py diff --git a/climetlab/sources/zarr.py b/src/climetlab/sources/zarr.py similarity index 96% rename from climetlab/sources/zarr.py rename to src/climetlab/sources/zarr.py index 0c679dc0..d1fc407a 100644 --- a/climetlab/sources/zarr.py +++ b/src/climetlab/sources/zarr.py @@ -65,10 +65,6 @@ def url_to_s3_store(url, user=None, password=None): def find_store(store): o = urlparse(store) - if "@" in o.netloc: - auth, server = o.netloc.split("@") - user, password = auth.split(":") - if o.scheme in ["http", "https", "s3"]: return url_to_s3_store(store) if os.path.exists(store): diff --git a/climetlab/sources/zarr_s3.py b/src/climetlab/sources/zarr_s3.py similarity index 100% rename from climetlab/sources/zarr_s3.py rename to src/climetlab/sources/zarr_s3.py diff --git a/climetlab/sources/zenodo.py b/src/climetlab/sources/zenodo.py similarity index 84% rename from climetlab/sources/zenodo.py rename to src/climetlab/sources/zenodo.py index c7753655..1bde4f86 100644 --- a/climetlab/sources/zenodo.py +++ b/src/climetlab/sources/zenodo.py @@ -38,15 +38,11 @@ def __init__( if file_key is None: if len(urls) != 1: - raise ValueError( - f"No `file_key` given, please specify on of {sorted(urls.keys())}" - ) + raise ValueError(f"No `file_key` given, please specify on of {sorted(urls.keys())}") file_key = list(urls.keys())[0] if file_key not in urls: - raise ValueError( - f"Invalid zenodo key '{file_key}', values are {sorted(urls.keys())}" - ) + raise ValueError(f"Invalid zenodo key '{file_key}', values are {sorted(urls.keys())}") LOG.debug("ZENODO record_keys %s", sorted(urls.keys())) diff --git a/climetlab/sphinxext/__init__.py b/src/climetlab/sphinxext/__init__.py similarity index 100% rename from climetlab/sphinxext/__init__.py rename to src/climetlab/sphinxext/__init__.py diff --git a/climetlab/sphinxext/command_output.py b/src/climetlab/sphinxext/command_output.py similarity index 94% rename from climetlab/sphinxext/command_output.py rename to src/climetlab/sphinxext/command_output.py index d9957702..03e6dd8e 100644 --- a/climetlab/sphinxext/command_output.py +++ b/src/climetlab/sphinxext/command_output.py @@ -28,9 +28,7 @@ def run(self): try: # Get current file - current_rst_file = self.state_machine.input_lines.source( - self.lineno - self.state_machine.input_offset - 1 - ) + current_rst_file = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1) os.chdir(os.path.dirname(current_rst_file)) diff --git a/climetlab/sphinxext/docs/_static/gallery/layers/default-background.svg b/src/climetlab/sphinxext/docs/_static/gallery/layers/default-background.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/layers/default-background.svg rename to src/climetlab/sphinxext/docs/_static/gallery/layers/default-background.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/layers/default-foreground.svg b/src/climetlab/sphinxext/docs/_static/gallery/layers/default-foreground.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/layers/default-foreground.svg rename to src/climetlab/sphinxext/docs/_static/gallery/layers/default-foreground.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/layers/land-sea.svg b/src/climetlab/sphinxext/docs/_static/gallery/layers/land-sea.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/layers/land-sea.svg rename to src/climetlab/sphinxext/docs/_static/gallery/layers/land-sea.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/africa.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/africa.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/africa.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/africa.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/asia.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/asia.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/asia.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/asia.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/bonne.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/bonne.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/bonne.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/bonne.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/collignon.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/collignon.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/collignon.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/collignon.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/euro-atlantic.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/euro-atlantic.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/euro-atlantic.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/euro-atlantic.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/europe-cylindrical.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/europe-cylindrical.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/europe-cylindrical.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/europe-cylindrical.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/europe.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/europe.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/europe.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/europe.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/global.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/global.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/global.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/global.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/goode.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/goode.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/goode.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/goode.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/mercator.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/mercator.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/mercator.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/mercator.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/mollweide.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/mollweide.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/mollweide.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/mollweide.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/north-america.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/north-america.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/north-america.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/north-america.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/north-america1.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/north-america1.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/north-america1.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/north-america1.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/projections/north-atlantic.svg b/src/climetlab/sphinxext/docs/_static/gallery/projections/north-atlantic.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/projections/north-atlantic.svg rename to src/climetlab/sphinxext/docs/_static/gallery/projections/north-atlantic.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/styles/cyclone-track.svg b/src/climetlab/sphinxext/docs/_static/gallery/styles/cyclone-track.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/styles/cyclone-track.svg rename to src/climetlab/sphinxext/docs/_static/gallery/styles/cyclone-track.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/styles/default-style-fields.svg b/src/climetlab/sphinxext/docs/_static/gallery/styles/default-style-fields.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/styles/default-style-fields.svg rename to src/climetlab/sphinxext/docs/_static/gallery/styles/default-style-fields.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/styles/default-style-observations.svg b/src/climetlab/sphinxext/docs/_static/gallery/styles/default-style-observations.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/styles/default-style-observations.svg rename to src/climetlab/sphinxext/docs/_static/gallery/styles/default-style-observations.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/styles/land-sea-mask.svg b/src/climetlab/sphinxext/docs/_static/gallery/styles/land-sea-mask.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/styles/land-sea-mask.svg rename to src/climetlab/sphinxext/docs/_static/gallery/styles/land-sea-mask.svg diff --git a/climetlab/sphinxext/docs/_static/gallery/styles/rainbow-markers.svg b/src/climetlab/sphinxext/docs/_static/gallery/styles/rainbow-markers.svg similarity index 100% rename from climetlab/sphinxext/docs/_static/gallery/styles/rainbow-markers.svg rename to src/climetlab/sphinxext/docs/_static/gallery/styles/rainbow-markers.svg diff --git a/climetlab/sphinxext/file_content.py b/src/climetlab/sphinxext/file_content.py similarity index 100% rename from climetlab/sphinxext/file_content.py rename to src/climetlab/sphinxext/file_content.py diff --git a/climetlab/sphinxext/generate_cmdline_help.py b/src/climetlab/sphinxext/generate_cmdline_help.py similarity index 93% rename from climetlab/sphinxext/generate_cmdline_help.py rename to src/climetlab/sphinxext/generate_cmdline_help.py index 2ed2299e..7eea40d3 100644 --- a/climetlab/sphinxext/generate_cmdline_help.py +++ b/src/climetlab/sphinxext/generate_cmdline_help.py @@ -49,12 +49,10 @@ def execute(*args): print() # Not garanteed to work with future versions of Python - func._argparser.formatter_class = ( - lambda prog: argparse.RawDescriptionHelpFormatter( - prog, - width=90, - max_help_position=10000, - ) + func._argparser.formatter_class = lambda prog: argparse.RawDescriptionHelpFormatter( + prog, + width=90, + max_help_position=10000, ) for n in func._argparser.format_help().split("\n"): diff --git a/climetlab/sphinxext/generate_gallery_rst.py b/src/climetlab/sphinxext/generate_gallery_rst.py similarity index 92% rename from climetlab/sphinxext/generate_gallery_rst.py rename to src/climetlab/sphinxext/generate_gallery_rst.py index fb63503f..e8bcea3f 100644 --- a/climetlab/sphinxext/generate_gallery_rst.py +++ b/src/climetlab/sphinxext/generate_gallery_rst.py @@ -62,14 +62,10 @@ def plot_style(name, path): sample = gallery["sample"] if "source" in sample: source = sample["source"] - data = get_source( - source["name"], source.get("to_pandas"), **source.get("args", {}) - ) + data = get_source(source["name"], source.get("to_pandas"), **source.get("args", {})) else: dataset = sample["dataset"] - data = get_dataset( - dataset["name"], dataset.get("to_pandas"), **dataset.get("args", {}) - ) + data = get_dataset(dataset["name"], dataset.get("to_pandas"), **dataset.get("args", {})) else: if "msymb" in yaml["magics"]: data = get_dataset( diff --git a/climetlab/sphinxext/generate_settings_rst.py b/src/climetlab/sphinxext/generate_settings_rst.py similarity index 100% rename from climetlab/sphinxext/generate_settings_rst.py rename to src/climetlab/sphinxext/generate_settings_rst.py diff --git a/climetlab/sphinxext/module_output.py b/src/climetlab/sphinxext/module_output.py similarity index 95% rename from climetlab/sphinxext/module_output.py rename to src/climetlab/sphinxext/module_output.py index 7e3015a7..fac6fb47 100644 --- a/climetlab/sphinxext/module_output.py +++ b/src/climetlab/sphinxext/module_output.py @@ -27,9 +27,7 @@ def run(self): save = sys.stdout try: # Get current file - current_rst_file = self.state_machine.input_lines.source( - self.lineno - self.state_machine.input_offset - 1 - ) + current_rst_file = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1) args = [x for x in self.content if x != ""][0].split(" ") name = args.pop(0) diff --git a/climetlab/testing.py b/src/climetlab/testing.py similarity index 93% rename from climetlab/testing.py rename to src/climetlab/testing.py index 56642eec..cb4c098b 100644 --- a/climetlab/testing.py +++ b/src/climetlab/testing.py @@ -17,7 +17,8 @@ from climetlab import load_source from climetlab.readers.text import TextReader from climetlab.sources.empty import EmptySource -from climetlab.utils import download_and_cache, module_installed +from climetlab.utils import download_and_cache +from climetlab.utils import module_installed LOG = logging.getLogger(__name__) @@ -39,7 +40,7 @@ def network_off(): def climetlab_file(*args): - top = os.path.dirname(os.path.dirname(__file__)) + top = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) return os.path.join(top, *args) @@ -133,9 +134,7 @@ def build_testdata(dir="testdata"): if os.path.exists(outpath): continue os.makedirs(os.path.dirname(outpath), exist_ok=True) - shutil.copyfile( - download_and_cache(TEST_DATA_URL_INPUT_GRIB + "/" + path), outpath - ) + shutil.copyfile(download_and_cache(TEST_DATA_URL_INPUT_GRIB + "/" + path), outpath) return dir diff --git a/climetlab/utils/__init__.py b/src/climetlab/utils/__init__.py similarity index 97% rename from climetlab/utils/__init__.py rename to src/climetlab/utils/__init__.py index bca7a7c2..2240dab1 100644 --- a/climetlab/utils/__init__.py +++ b/src/climetlab/utils/__init__.py @@ -143,9 +143,7 @@ def load_json_or_yaml(path): return json.load(f) if path.endswith(".yaml") or path.endswith(".yml"): return yaml.safe_load(f) - raise ValueError( - f"Cannot read file {path}. Need json or yaml with appropriate extension." - ) + raise ValueError(f"Cannot read file {path}. Need json or yaml with appropriate extension.") def progress_bar(*, total=None, iterable=None, initial=0, desc=None): diff --git a/climetlab/utils/availability.py b/src/climetlab/utils/availability.py similarity index 95% rename from climetlab/utils/availability.py rename to src/climetlab/utils/availability.py index 16ebc3d9..40c09660 100644 --- a/climetlab/utils/availability.py +++ b/src/climetlab/utils/availability.py @@ -18,9 +18,11 @@ import yaml from climetlab.utils import load_json_or_yaml -from climetlab.utils.factorise import Tree, factorise +from climetlab.utils.factorise import Tree +from climetlab.utils.factorise import factorise -from .humanize import dict_to_human, list_to_human +from .humanize import dict_to_human +from .humanize import list_to_human def _tidy_dict(query): @@ -178,9 +180,7 @@ def check(self, _kwargs=None, **kwargs): if not reasons: def iterate_request(r): - yield from ( - dict(zip(r.keys(), x)) for x in itertools.product(*r.values()) - ) + yield from (dict(zip(r.keys(), x)) for x in itertools.product(*r.values())) def build(x): # if isinstance(x, (list, tuple)): diff --git a/climetlab/utils/bbox.py b/src/climetlab/utils/bbox.py similarity index 90% rename from climetlab/utils/bbox.py rename to src/climetlab/utils/bbox.py index 25e8e300..3efefa4b 100644 --- a/climetlab/utils/bbox.py +++ b/src/climetlab/utils/bbox.py @@ -35,19 +35,13 @@ def __init__(self, *, north, west, south, east, check=True): self.east = _normalize(float(east), self.west) if self.north < self.south and check: - raise ValueError( - f"Invalid bounding box, north={self.north} < south={self.south}" - ) + raise ValueError(f"Invalid bounding box, north={self.north} < south={self.south}") if self.west > self.east and check: - raise ValueError( - f"Invalid bounding box, west={self.west} > east={self.east}" - ) + raise ValueError(f"Invalid bounding box, west={self.west} > east={self.east}") if self.east > self.west + 360 and check: - raise ValueError( - f"Invalid bounding box, east={self.east} > west={self.west}+360" - ) + raise ValueError(f"Invalid bounding box, east={self.east} > west={self.west}+360") def __repr__(self): return "BoundingBox(north=%g,west=%g,south=%g,east=%g)" % ( @@ -147,9 +141,7 @@ def merge(self, other): def add_margins(self, margins): if isinstance(margins, str) and margins[-1] == "%": margins = int(margins[:-1]) / 100.0 - margins = max( - (self.north - self.south) * margins, (self.east - self.west) * margins - ) + margins = max((self.north - self.south) * margins, (self.east - self.west) * margins) # TODO:check east/west margins_lat = margins diff --git a/climetlab/utils/config.py b/src/climetlab/utils/config.py similarity index 92% rename from climetlab/utils/config.py rename to src/climetlab/utils/config.py index d76962dd..5ea469f2 100644 --- a/climetlab/utils/config.py +++ b/src/climetlab/utils/config.py @@ -20,7 +20,8 @@ import numpy as np -from climetlab.core.order import build_remapping, normalize_order_by +from climetlab.core.order import build_remapping +from climetlab.core.order import normalize_order_by from climetlab.utils import load_json_or_yaml from climetlab.utils.humanize import seconds @@ -35,9 +36,7 @@ def __init__(self, *args, **kwargs): self[key] = DictObj(value) continue if isinstance(value, list): - self[key] = [ - DictObj(item) if isinstance(item, dict) else item for item in value - ] + self[key] = [DictObj(item) if isinstance(item, dict) else item for item in value] continue def __getattr__(self, attr): @@ -83,9 +82,7 @@ def get_datetimes(self): datetimes = new if datetimes != new: - raise ValueError( - "Mismatch in datetimes", previous_name, datetimes, i.name, new - ) + raise ValueError("Mismatch in datetimes", previous_name, datetimes, i.name, new) previous_name = i.name if datetimes is None: @@ -181,12 +178,8 @@ def __init__(self, dic): if "source_or_dataset" in self.config: # add $ to source_or_dataset for constants source. # climetlab will be refactored to remove this. - assert self.config["source_or_dataset"][0] != "$", self.config[ - "source_or_dataset" - ] - self.config["source_or_dataset"] = ( - "$" + self.config["source_or_dataset"] - ) + assert self.config["source_or_dataset"][0] != "$", self.config["source_or_dataset"] + self.config["source_or_dataset"] = "$" + self.config["source_or_dataset"] self.kwargs = self.config.get("kwargs", {}) self.inherit = self.config.get("inherit", []) @@ -228,18 +221,13 @@ def get_datetimes(self): assert isinstance(hdate, (list, tuple)), hdate if len(date) > 1 and len(hdate) > 1: raise NotImplementedError( - ( - f"Cannot have multiple dates in {self} " - "when using hindcast {date=}, {hdate=}" - ) + (f"Cannot have multiple dates in {self} " "when using hindcast {date=}, {hdate=}") ) date = hdate del hdate if len(step) > 1 and len(time) > 1: - raise NotImplementedError( - f"Cannot have multiple steps and multiple times in {self}" - ) + raise NotImplementedError(f"Cannot have multiple steps and multiple times in {self}") datetimes = set() for d, t, s in itertools.product(date, time, step): @@ -255,7 +243,8 @@ def get_datetimes(self): def do_load(self, partial=False): if not self._do_load or self._do_load[1] != partial: - from climetlab import load_dataset, load_source + from climetlab import load_dataset + from climetlab import load_source func = { None: load_source, @@ -385,11 +374,7 @@ def __init__(self, loops, input, output, partial=False): inputs = Inputs(input) self.output = output self.loops = [ - ( - c - if isinstance(c, Loop) and c.inputs == inputs - else Loop(c, inputs, parent=self, partial=partial) - ) + (c if isinstance(c, Loop) and c.inputs == inputs else Loop(c, inputs, parent=self, partial=partial)) for c in loops ] if not self.loops: @@ -450,9 +435,7 @@ def _info(self): ) coords = deepcopy(ref.coords) - assert ( - "valid_datetime" in coords - ), f"valid_datetime not found in coords {coords}" + assert "valid_datetime" in coords, f"valid_datetime not found in coords {coords}" coords["valid_datetime"] = self.get_datetimes() for info in infos: @@ -571,9 +554,7 @@ def __init__(self, dic, inputs, partial=False, parent=None): self.config.applies_to = [i.name for i in inputs] assert "applies_to" in self.config, self.config applies_to = self.config.pop("applies_to") - self.applies_to_inputs = Inputs( - [input for input in inputs if input.name in applies_to] - ) + self.applies_to_inputs = Inputs([input for input in inputs if input.name in applies_to]) for i in self.applies_to_inputs: i.process_inheritance(self.applies_to_inputs) @@ -619,9 +600,7 @@ def first(self): def _info(self): first_info = self.first._info coords = deepcopy(first_info.coords) - assert ( - "valid_datetime" in coords - ), f"valid_datetime not found in coords {coords}" + assert "valid_datetime" in coords, f"valid_datetime not found in coords {coords}" coords["valid_datetime"] = self.get_datetimes() return Info( first_field=first_info.first_field, @@ -713,9 +692,7 @@ def check(actual_dic, requested_dic): actual = list(actual) if requested == "ascending": - assert actual == sorted( - actual - ), f"Requested= {requested} Actual= {actual}" + assert actual == sorted(actual), f"Requested= {requested} Actual= {actual}" continue assert actual == requested, f"Requested= {requested} Actual= {actual}" @@ -734,9 +711,7 @@ def _info(self): coords = cube.user_coords variables = list(coords[list(coords.keys())[1]]) - return Info( - first_field, grid_points, resolution, coords, variables, data_request - ) + return Info(first_field, grid_points, resolution, coords, variables, data_request) def _get_data_request(self, data): date = None @@ -776,9 +751,7 @@ def sort(old_dic): params_steps = sort(params_steps) params_levels = sort(params_levels) - out = dict( - param_level=params_levels, param_step=params_steps, area=area, grid=grid - ) + out = dict(param_level=params_levels, param_step=params_steps, area=area, grid=grid) return out @@ -805,9 +778,7 @@ def _format_list(x): class Info: - def __init__( - self, first_field, grid_points, resolution, coords, variables, data_request - ): + def __init__(self, first_field, grid_points, resolution, coords, variables, data_request): assert len(set(variables)) == len(variables), ( "Duplicate variables", variables, @@ -867,9 +838,7 @@ def dict_to_str(cls, x): class NonePurpose(Purpose): def __call__(self, config): config.output.flatten_grid = config.output.get("flatten_grid", False) - config.output.ensemble_dimension = config.output.get( - "ensemble_dimension", False - ) + config.output.ensemble_dimension = config.output.get("ensemble_dimension", False) class AifsPurpose(Purpose): @@ -877,9 +846,7 @@ def __call__(self, config): def check_dict_value_and_set(dic, key, value): if key in dic: if dic[key] != value: - raise ValueError( - f"Cannot use {key}={dic[key]} with {self} purpose. Must use {value}." - ) + raise ValueError(f"Cannot use {key}={dic[key]} with {self} purpose. Must use {value}.") dic[key] = value def ensure_element_in_list(lst, elt, index): @@ -940,9 +907,7 @@ def __init__(self, config, *args, **kwargs): self.output.order_by = normalize_order_by(self.output.order_by) self.output.remapping = self.output.get("remapping", {}) - self.output.remapping = build_remapping( - self.output.remapping, patches={"number": {None: 0}} - ) + self.output.remapping = build_remapping(self.output.remapping, patches={"number": {None: 0}}) self.output.chunking = self.output.get("chunking", {}) self.output.dtype = self.output.get("dtype", "float32") @@ -962,9 +927,7 @@ def __init__(self, config, *args, **kwargs): if k == statistics_axis_name: statistics_axis = i - assert ( - statistics_axis >= 0 - ), f"{self.output.statistics} not in {list(self.output.order_by.keys())}" + assert statistics_axis >= 0, f"{self.output.statistics} not in {list(self.output.order_by.keys())}" self.statistics_names = self.output.order_by[statistics_axis_name] @@ -1008,9 +971,7 @@ def substitute(x, vars=None, ignore_missing=False): return [substitute(y, vars, ignore_missing=ignore_missing) for y in x] if isinstance(x, dict): - return { - k: substitute(v, vars, ignore_missing=ignore_missing) for k, v in x.items() - } + return {k: substitute(v, vars, ignore_missing=ignore_missing) for k, v in x.items()} if isinstance(x, str): if "$" not in x: @@ -1034,10 +995,7 @@ def substitute(x, vars=None, ignore_missing=False): function_name = match.group(1) params = [p.strip() for p in match.group(2).split(",")] - params = [ - substitute(p, vars, ignore_missing=ignore_missing) - for p in params - ] + params = [substitute(p, vars, ignore_missing=ignore_missing) for p in params] bit = FUNCTIONS[function_name](*params) @@ -1210,7 +1168,9 @@ def grouper_key(self, x): return { 1: lambda x: 0, # only one group None: lambda x: x, # one group per value - }[self.group_by](x) + }[ + self.group_by + ](x) def _expand_class(values): diff --git a/climetlab/utils/conventions.py b/src/climetlab/utils/conventions.py similarity index 100% rename from climetlab/utils/conventions.py rename to src/climetlab/utils/conventions.py diff --git a/climetlab/utils/dates.py b/src/climetlab/utils/dates.py similarity index 85% rename from climetlab/utils/dates.py rename to src/climetlab/utils/dates.py index 92ef72d2..5c390b4b 100644 --- a/climetlab/utils/dates.py +++ b/src/climetlab/utils/dates.py @@ -75,14 +75,8 @@ def to_datetime_list(datetimes): # noqa C901 return to_datetime_list([datetimes]) if isinstance(datetimes, (list, tuple)): - if ( - len(datetimes) == 3 - and isinstance(datetimes[1], str) - and datetimes[1].lower() == "to" - ): - return mars_like_date_list( - to_datetime(datetimes[0]), to_datetime(datetimes[2]), 1 - ) + if len(datetimes) == 3 and isinstance(datetimes[1], str) and datetimes[1].lower() == "to": + return mars_like_date_list(to_datetime(datetimes[0]), to_datetime(datetimes[2]), 1) if ( len(datetimes) == 5 @@ -91,9 +85,7 @@ def to_datetime_list(datetimes): # noqa C901 and datetimes[1].lower() == "to" and datetimes[3].lower() == "by" ): - return mars_like_date_list( - to_datetime(datetimes[0]), to_datetime(datetimes[2]), int(datetimes[4]) - ) + return mars_like_date_list(to_datetime(datetimes[0]), to_datetime(datetimes[2]), int(datetimes[4])) return [to_datetime(x) for x in datetimes] diff --git a/climetlab/utils/domains.py b/src/climetlab/utils/domains.py similarity index 96% rename from climetlab/utils/domains.py rename to src/climetlab/utils/domains.py index ae2db582..3eaf996e 100644 --- a/climetlab/utils/domains.py +++ b/src/climetlab/utils/domains.py @@ -71,9 +71,7 @@ def _update_areas(old, new, prefix=""): name = prefix + name assert name not in old, f"{name} already defined." if len(values) > 1: - LOG.debug( - f"Area {name} has multiple values {values}. Not supported by CliMetLab." - ) + LOG.debug(f"Area {name} has multiple values {values}. Not supported by CliMetLab.") continue old[name] = values[0] diff --git a/climetlab/utils/factorise.py b/src/climetlab/utils/factorise.py similarity index 98% rename from climetlab/utils/factorise.py rename to src/climetlab/utils/factorise.py index 6731fb42..97a01509 100644 --- a/climetlab/utils/factorise.py +++ b/src/climetlab/utils/factorise.py @@ -479,9 +479,7 @@ def parse_date(d): previous = parse_date(str(v[i - 1])) if current - previous != step: print(int(v[i])) - print( - f"expecting {previous + step} after {previous}, found {current}" - ) + print(f"expecting {previous + step} after {previous}, found {current}") raise ReturnNoneNone() return str(v[0]), str(v[-1]) @@ -596,9 +594,7 @@ def set_elem(self, c, r, v): return self.cols[self.colidx[c]].set_value(self.rowidx[r], v) def __repr__(self): - return repr( - [[self.cols[col].value(row) for row in self.rowidx] for col in self.colidx] - ) + return repr([[self.cols[col].value(row) for row in self.rowidx] for col in self.colidx]) def column(self, s, col): self.cols.append(Column(s, col)) diff --git a/climetlab/utils/html.py b/src/climetlab/utils/html.py similarity index 100% rename from climetlab/utils/html.py rename to src/climetlab/utils/html.py diff --git a/climetlab/utils/humanize.py b/src/climetlab/utils/humanize.py similarity index 99% rename from climetlab/utils/humanize.py rename to src/climetlab/utils/humanize.py index c81704e8..bde0764f 100644 --- a/climetlab/utils/humanize.py +++ b/src/climetlab/utils/humanize.py @@ -271,7 +271,7 @@ def string_distance(s, t): def did_you_mean(word, vocabulary): - distance, best = min((string_distance(word, w), w) for w in vocabulary) + _, best = min((string_distance(word, w), w) for w in vocabulary) # if distance < min(len(word), len(best)): return best diff --git a/climetlab/utils/kwargs.py b/src/climetlab/utils/kwargs.py similarity index 100% rename from climetlab/utils/kwargs.py rename to src/climetlab/utils/kwargs.py diff --git a/climetlab/utils/lazy.py b/src/climetlab/utils/lazy.py similarity index 100% rename from climetlab/utils/lazy.py rename to src/climetlab/utils/lazy.py diff --git a/climetlab/utils/parts.py b/src/climetlab/utils/parts.py similarity index 74% rename from climetlab/utils/parts.py rename to src/climetlab/utils/parts.py index 6383f07d..c1730fe9 100644 --- a/climetlab/utils/parts.py +++ b/src/climetlab/utils/parts.py @@ -20,11 +20,7 @@ def __init__(self, path, offset, length): self.length = length def __eq__(self, other): - return ( - self.path == other.path - and self.offset == other.offset - and self.length == other.length - ) + return self.path == other.path and self.offset == other.offset and self.length == other.length @classmethod def resolve(cls, parts, directory=None): @@ -33,14 +29,8 @@ def resolve(cls, parts, directory=None): paths[part.path].append(part) for path, bits in paths.items(): - if ( - path.startswith("http://") - or path.startswith("https://") - or path.startswith("ftp://") - ): - newpath = download_and_cache( - path, parts=[(p.offset, p.length) for p in bits] - ) + if path.startswith("http://") or path.startswith("https://") or path.startswith("ftp://"): + newpath = download_and_cache(path, parts=[(p.offset, p.length) for p in bits]) newoffset = 0 for p in bits: p.path = newpath diff --git a/climetlab/utils/patterns.py b/src/climetlab/utils/patterns.py similarity index 84% rename from climetlab/utils/patterns.py rename to src/climetlab/utils/patterns.py index 793f257e..016cacdb 100644 --- a/climetlab/utils/patterns.py +++ b/src/climetlab/utils/patterns.py @@ -29,11 +29,7 @@ def __init__(self, enum=""): def substitute(self, value, name): if self.enum and value not in self.enum: - raise ValueError( - "Invalid value '{}' for parameter '{}', expected one of {}".format( - value, name, self.enum - ) - ) + raise ValueError("Invalid value '{}' for parameter '{}', expected one of {}".format(value, name, self.enum)) return value @@ -43,11 +39,7 @@ def __init__(self, format="%d"): def substitute(self, value, name): if not isinstance(value, int): - raise ValueError( - "Invalid value '{}' for parameter '{}', expected an integer".format( - value, name - ) - ) + raise ValueError("Invalid value '{}' for parameter '{}', expected an integer".format(value, name)) return self.format % value @@ -57,11 +49,7 @@ def __init__(self, format="%g"): def substitute(self, value, name): if not isinstance(value, (int, float)): - raise ValueError( - "Invalid value '{}' for parameter '{}', expected a float".format( - value, name - ) - ) + raise ValueError("Invalid value '{}' for parameter '{}', expected a float".format(value, name)) return self.format % value @@ -80,11 +68,7 @@ def __init__(self, format="%s"): def substitute(self, value, name): if not isinstance(value, str): - raise ValueError( - "Invalid value '{}' for parameter '{}', expected a string".format( - value, name - ) - ) + raise ValueError("Invalid value '{}' for parameter '{}', expected a string".format(value, name)) return self.format % value @@ -194,9 +178,7 @@ def _substitute_many(self, params): seen = set() result = [] - for n in ( - dict(zip(params.keys(), x)) for x in itertools.product(*params.values()) - ): + for n in (dict(zip(params.keys(), x)) for x in itertools.product(*params.values())): m = self.substitute(n) if m not in seen: seen.add(m) diff --git a/climetlab/utils/serialise.py b/src/climetlab/utils/serialise.py similarity index 100% rename from climetlab/utils/serialise.py rename to src/climetlab/utils/serialise.py diff --git a/climetlab/vocabularies/__init__.py b/src/climetlab/vocabularies/__init__.py similarity index 100% rename from climetlab/vocabularies/__init__.py rename to src/climetlab/vocabularies/__init__.py diff --git a/climetlab/vocabularies/aliases.py b/src/climetlab/vocabularies/aliases.py similarity index 100% rename from climetlab/vocabularies/aliases.py rename to src/climetlab/vocabularies/aliases.py diff --git a/climetlab/vocabularies/cf.py b/src/climetlab/vocabularies/cf.py similarity index 100% rename from climetlab/vocabularies/cf.py rename to src/climetlab/vocabularies/cf.py diff --git a/climetlab/vocabularies/grib-paramid.csv b/src/climetlab/vocabularies/grib-paramid.csv similarity index 100% rename from climetlab/vocabularies/grib-paramid.csv rename to src/climetlab/vocabularies/grib-paramid.csv diff --git a/climetlab/vocabularies/grib.py b/src/climetlab/vocabularies/grib.py similarity index 100% rename from climetlab/vocabularies/grib.py rename to src/climetlab/vocabularies/grib.py diff --git a/climetlab/wrappers/__init__.py b/src/climetlab/wrappers/__init__.py similarity index 100% rename from climetlab/wrappers/__init__.py rename to src/climetlab/wrappers/__init__.py diff --git a/climetlab/wrappers/date.py b/src/climetlab/wrappers/date.py similarity index 100% rename from climetlab/wrappers/date.py rename to src/climetlab/wrappers/date.py diff --git a/climetlab/wrappers/integer.py b/src/climetlab/wrappers/integer.py similarity index 87% rename from climetlab/wrappers/integer.py rename to src/climetlab/wrappers/integer.py index 3bbce013..f5b78921 100644 --- a/climetlab/wrappers/integer.py +++ b/src/climetlab/wrappers/integer.py @@ -20,9 +20,7 @@ def to_datetime(self): date = datetime.datetime.utcnow() + datetime.timedelta(days=self.data) return datetime.datetime(date.year, date.month, date.day) else: - return datetime.datetime( - self.data // 10000, self.data % 10000 // 100, self.data % 100 - ) + return datetime.datetime(self.data // 10000, self.data % 10000 // 100, self.data % 100) def to_datetime_list(self): return [self.to_datetime()] diff --git a/climetlab/wrappers/ndarray.py b/src/climetlab/wrappers/ndarray.py similarity index 86% rename from climetlab/wrappers/ndarray.py rename to src/climetlab/wrappers/ndarray.py index 1582e5e1..01be9883 100644 --- a/climetlab/wrappers/ndarray.py +++ b/src/climetlab/wrappers/ndarray.py @@ -8,7 +8,8 @@ # import datetime -from climetlab.wrappers import Wrapper, get_wrapper +from climetlab.wrappers import Wrapper +from climetlab.wrappers import get_wrapper class NumpyArrayWrapper(Wrapper): @@ -36,10 +37,7 @@ def plot_map(self, backend): ) def to_datetime_list(self): - return [ - datetime.datetime.fromtimestamp(x * 1e-9, tz=datetime.timezone.utc) - for x in self.data.tolist() - ] + return [datetime.datetime.fromtimestamp(x * 1e-9, tz=datetime.timezone.utc) for x in self.data.tolist()] def wrapper(data, *args, **kwargs): diff --git a/climetlab/wrappers/none.py b/src/climetlab/wrappers/none.py similarity index 100% rename from climetlab/wrappers/none.py rename to src/climetlab/wrappers/none.py diff --git a/climetlab/wrappers/pandas.py b/src/climetlab/wrappers/pandas.py similarity index 100% rename from climetlab/wrappers/pandas.py rename to src/climetlab/wrappers/pandas.py diff --git a/climetlab/wrappers/string.py b/src/climetlab/wrappers/string.py similarity index 86% rename from climetlab/wrappers/string.py rename to src/climetlab/wrappers/string.py index 70d61b7c..ae518459 100644 --- a/climetlab/wrappers/string.py +++ b/src/climetlab/wrappers/string.py @@ -10,7 +10,8 @@ import datetime import re -from dateutil.parser import isoparse, parse +from dateutil.parser import isoparse +from dateutil.parser import parse from climetlab.wrappers import Wrapper @@ -58,9 +59,7 @@ def to_datetime_list(self): y = first_day.year m = first_day.month n_days = calendar.monthrange(y, m)[1] - return mars_like_date_list( - parse_date(f"{y}-{m:02}-01"), parse_date(f"{y}-{m:02}-{n_days}"), 1 - ) + return mars_like_date_list(parse_date(f"{y}-{m:02}-01"), parse_date(f"{y}-{m:02}-{n_days}"), 1) # MARS style lists bits = self.data.split("/") @@ -68,9 +67,7 @@ def to_datetime_list(self): return mars_like_date_list(parse_date(bits[0]), parse_date(bits[2]), 1) if len(bits) == 5 and bits[1].lower() == "to" and bits[3].lower() == "by": - return mars_like_date_list( - parse_date(bits[0]), parse_date(bits[2]), int(bits[4]) - ) + return mars_like_date_list(parse_date(bits[0]), parse_date(bits[2]), int(bits[4])) return [parse_date(d) for d in bits] diff --git a/climetlab/wrappers/tensor.py b/src/climetlab/wrappers/tensor.py similarity index 100% rename from climetlab/wrappers/tensor.py rename to src/climetlab/wrappers/tensor.py diff --git a/climetlab/wrappers/xarray.py b/src/climetlab/wrappers/xarray.py similarity index 97% rename from climetlab/wrappers/xarray.py rename to src/climetlab/wrappers/xarray.py index 3eb157f4..6e958a7a 100644 --- a/climetlab/wrappers/xarray.py +++ b/src/climetlab/wrappers/xarray.py @@ -81,9 +81,7 @@ def __init__(self, data): self.west = np.amin(self.longitude.data) def plot_map(self, backend): - backend.bounding_box( - north=self.north, south=self.south, west=self.west, east=self.east - ) + backend.bounding_box(north=self.north, south=self.south, west=self.west, east=self.east) dimension_settings = dict() diff --git a/tests/converters/test_metview.py b/tests/converters/test_metview.py index 9627e1fc..2403de7a 100644 --- a/tests/converters/test_metview.py +++ b/tests/converters/test_metview.py @@ -14,7 +14,8 @@ import pytest import climetlab as cml -from climetlab.testing import MISSING, climetlab_file +from climetlab.testing import MISSING +from climetlab.testing import climetlab_file LOG = logging.getLogger(__name__) diff --git a/tests/converters/test_tfdataset.py b/tests/converters/test_tfdataset.py index 112aba9b..2b56dc03 100644 --- a/tests/converters/test_tfdataset.py +++ b/tests/converters/test_tfdataset.py @@ -14,7 +14,9 @@ import pytest import climetlab as cml -from climetlab.testing import MISSING, NO_CDS, climetlab_file +from climetlab.testing import MISSING +from climetlab.testing import NO_CDS +from climetlab.testing import climetlab_file LOG = logging.getLogger(__name__) @@ -62,7 +64,9 @@ def test_tfdataset_grib_4(): @pytest.mark.skipif(NO_CDS, reason="No access to CDS") @pytest.mark.skipif(MISSING("tensorflow"), reason="Tensorflow not installed") def test_tfdataset_2(): - from tensorflow.keras.layers import Dense, Flatten, Input + from tensorflow.keras.layers import Dense + from tensorflow.keras.layers import Flatten + from tensorflow.keras.layers import Input from tensorflow.keras.models import Sequential ds = cml.load_dataset("high-low") diff --git a/tests/core/test_cache.py b/tests/core/test_cache.py index 672e3c3c..7394b705 100644 --- a/tests/core/test_cache.py +++ b/tests/core/test_cache.py @@ -16,14 +16,13 @@ import pytest -from climetlab import load_source, settings -from climetlab.core.caching import ( - cache_entries, - cache_file, - cache_size, - dump_cache_database, - purge_cache, -) +from climetlab import load_source +from climetlab import settings +from climetlab.core.caching import cache_entries +from climetlab.core.caching import cache_file +from climetlab.core.caching import cache_size +from climetlab.core.caching import dump_cache_database +from climetlab.core.caching import purge_cache from climetlab.core.temporary import temp_directory from climetlab.testing import TEST_DATA_URL @@ -146,9 +145,7 @@ def test_multiprocessing(): def func(val): # import climetlab as cml - source = cml.load_source( - "url", "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib" - ) + source = cml.load_source("url", "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib") source.to_xarray() return val + 1 diff --git a/tests/core/test_settings.py b/tests/core/test_settings.py index 81d33873..2ca146da 100644 --- a/tests/core/test_settings.py +++ b/tests/core/test_settings.py @@ -16,7 +16,8 @@ import climetlab.plotting from climetlab import settings -from climetlab.core.data import clear_cache, get_data_entry +from climetlab.core.data import clear_cache +from climetlab.core.data import get_data_entry from climetlab.core.plugins import directories from climetlab.core.temporary import temp_directory @@ -148,9 +149,7 @@ def test_temporary(): with settings.temporary("plotting-options", {"width": 100}): assert settings.get("styles-directories") == ["/c", "/d"] - assert settings.get("plotting-options") == {"width": 100}, settings.get( - "plotting-options" - ) + assert settings.get("plotting-options") == {"width": 100}, settings.get("plotting-options") settings.set("plotting-options", {"width": 200}) assert settings.get("plotting-options") == {"width": 200} settings.reset() @@ -215,9 +214,7 @@ def test_numbers(): assert settings.get("maximum-cache-size") == 1024 * 1024 * 1024 * 1024 settings.set("maximum-cache-size", "1P") - assert ( - settings.get("maximum-cache-size") == 1024 * 1024 * 1024 * 1024 * 1024 - ) + assert settings.get("maximum-cache-size") == 1024 * 1024 * 1024 * 1024 * 1024 settings.set("maximum-cache-size", None) assert settings.get("maximum-cache-size") is None diff --git a/tests/documentation/test_examples.py b/tests/documentation/test_examples.py index 099261bd..2d8e091c 100644 --- a/tests/documentation/test_examples.py +++ b/tests/documentation/test_examples.py @@ -13,7 +13,8 @@ import pytest -from climetlab.testing import IN_GITHUB, climetlab_file +from climetlab.testing import IN_GITHUB +from climetlab.testing import climetlab_file # import pytest diff --git a/tests/documentation/test_notebooks.py b/tests/documentation/test_notebooks.py index 87ff47fe..6726adb3 100644 --- a/tests/documentation/test_notebooks.py +++ b/tests/documentation/test_notebooks.py @@ -15,7 +15,9 @@ import pytest -from climetlab.testing import IN_GITHUB, MISSING, climetlab_file +from climetlab.testing import IN_GITHUB +from climetlab.testing import MISSING +from climetlab.testing import climetlab_file # See https://www.blog.pythonlibrary.org/2018/10/16/testing-jupyter-notebooks/ @@ -78,9 +80,7 @@ def notebooks_list(): reason="python package nbformat not installed", ) @pytest.mark.skipif(not IN_GITHUB, reason="Not on GITHUB") -@pytest.mark.skipif( - sys.platform == "win32", reason="Cannot execute notebooks on Windows" -) +@pytest.mark.skipif(sys.platform == "win32", reason="Cannot execute notebooks on Windows") @pytest.mark.parametrize("path", notebooks_list()) def test_notebook(path): import nbformat diff --git a/tests/example.ya_ml b/tests/example.ya_ml index 6b391d20..92be2ba1 100644 --- a/tests/example.ya_ml +++ b/tests/example.ya_ml @@ -2,4 +2,4 @@ title: - [string:with:colon, string with spaces, string] - - string:with:colon - string with spaces - - string \ No newline at end of file + - string diff --git a/tests/example.yaml b/tests/example.yaml index 6b391d20..9dfc208d 100644 --- a/tests/example.yaml +++ b/tests/example.yaml @@ -1,5 +1 @@ -title: -- [string:with:colon, string with spaces, string] -- - string:with:colon - - string with spaces - - string \ No newline at end of file +a: 2 diff --git a/tests/example.yml b/tests/example.yml index 6b391d20..9dfc208d 100644 --- a/tests/example.yml +++ b/tests/example.yml @@ -1,5 +1 @@ -title: -- [string:with:colon, string with spaces, string] -- - string:with:colon - - string with spaces - - string \ No newline at end of file +a: 2 diff --git a/tests/normalize/test_normalize_aliases.py b/tests/normalize/test_normalize_aliases.py index a2d05481..0175e156 100644 --- a/tests/normalize/test_normalize_aliases.py +++ b/tests/normalize/test_normalize_aliases.py @@ -11,7 +11,8 @@ import pytest -from climetlab.decorators import alias_argument, normalize +from climetlab.decorators import alias_argument +from climetlab.decorators import normalize def func_x(x): @@ -127,9 +128,7 @@ def test_aliases_grib_paramid_mutiple_false(typ): aliases_grib_paramid(tuple([])) -@pytest.mark.parametrize( - "typ,_131,_132", [(str, "131", "132"), (int, 131, 132), (float, 131.0, 132.0)] -) +@pytest.mark.parametrize("typ,_131,_132", [(str, "131", "132"), (int, 131, 132), (float, 131.0, 132.0)]) def test_aliases_grib_paramid_mutiple_true(typ, _131, _132): aliases_grib_paramid = normalize( "x", @@ -155,9 +154,7 @@ def test_aliases_grib_paramid_mutiple_true(typ, _131, _132): assert aliases_grib_paramid(tuple([])) == [] -@pytest.mark.parametrize( - "typ,_131,_132", [(str, "131", "132"), (int, 131, 132), (float, 131.0, 132.0)] -) +@pytest.mark.parametrize("typ,_131,_132", [(str, "131", "132"), (int, 131, 132), (float, 131.0, 132.0)]) def test_aliases_mutiple_none(typ, _131, _132): aliases_func = normalize( "x", diff --git a/tests/normalize/test_normalize_availability.py b/tests/normalize/test_normalize_availability.py index dce5bfa0..0e1afe4f 100644 --- a/tests/normalize/test_normalize_availability.py +++ b/tests/normalize/test_normalize_availability.py @@ -16,7 +16,8 @@ import pytest import yaml -from climetlab.decorators import availability, normalize +from climetlab.decorators import availability +from climetlab.decorators import normalize from climetlab.utils.availability import Availability @@ -43,9 +44,7 @@ def availability_s2s_as_list(): for k, v in config.items(): dic = dict(origin=k) - dic["date"] = [x.strftime("%Y-%m-%d") for x in pd.date_range(**v["alldates"])][ - :3 - ] + dic["date"] = [x.strftime("%Y-%m-%d") for x in pd.date_range(**v["alldates"])][:3] dic["number"] = list(range(1, v["number"] + 1)) dic["param"] = v["param"][:3] availability_list.append(dic) diff --git a/tests/normalize/test_normalize_date.py b/tests/normalize/test_normalize_date.py index c1e225bf..e8d4c7ce 100644 --- a/tests/normalize/test_normalize_date.py +++ b/tests/normalize/test_normalize_date.py @@ -46,9 +46,7 @@ def test_dates_formated_1(): @pytest.mark.skip(reason="Not implemented yet.") def test_enum_dates_formated(): - date_formated = normalize( - "d", values=["20010512", "20020512"], type="date", format="%Y.%m.%d" - )(f) + date_formated = normalize("d", values=["20010512", "20020512"], type="date", format="%Y.%m.%d")(f) assert date_formated("20200513") == "2020.05.13" diff --git a/tests/normalize/test_normalize_enum.py b/tests/normalize/test_normalize_enum.py index 057e65bb..cb565340 100644 --- a/tests/normalize/test_normalize_enum.py +++ b/tests/normalize/test_normalize_enum.py @@ -12,7 +12,8 @@ import pytest from climetlab import ALL -from climetlab.arguments.climetlab_types import EnumListType, EnumType +from climetlab.arguments.climetlab_types import EnumListType +from climetlab.arguments.climetlab_types import EnumType from climetlab.decorators import normalize @@ -99,19 +100,13 @@ def test_enum_float_1(): @pytest.mark.skip("Not implemented. Need to discuss what it would mean.") def test_enum_float_2(): - g = normalize("name", type=EnumListType([1, 0.5, 3]), format="%03f")( - name_no_default - ) + g = normalize("name", type=EnumListType([1, 0.5, 3]), format="%03f")(name_no_default) assert g(1) == ["1.000000"] - g = normalize("name", type=EnumListType([1, 0.5, 3]), format="%03f", multiple=True)( - name_no_default - ) + g = normalize("name", type=EnumListType([1, 0.5, 3]), format="%03f", multiple=True)(name_no_default) assert g(1) == ["1.000000"] - g = normalize( - "name", type=EnumListType([1, 0.5, 3]), format="%03f", multiple=False - )(name_no_default) + g = normalize("name", type=EnumListType([1, 0.5, 3]), format="%03f", multiple=False)(name_no_default) with pytest.raises(ValueError, match="Cannot .*"): assert g(1) == ["1.000000"] @@ -121,14 +116,10 @@ def test_enum_float_3(): g = normalize("name", type=EnumType([1, 0.5, 3]), format="%03f")(name_no_default) assert g(1) == "1.000000" - g = normalize("name", type=EnumType([1, 0.5, 3]), format="%03f", multiple=False)( - name_no_default - ) + g = normalize("name", type=EnumType([1, 0.5, 3]), format="%03f", multiple=False)(name_no_default) assert g(1) == "1.000000" - g = normalize("name", type=EnumType([1, 0.5, 3]), format="%03f", multiple=True)( - name_no_default - ) + g = normalize("name", type=EnumType([1, 0.5, 3]), format="%03f", multiple=True)(name_no_default) with pytest.raises(ValueError, match="Cannot .*"): assert g(1) == ["1.000000"] diff --git a/tests/normalize/test_normalize_errors.py b/tests/normalize/test_normalize_errors.py index 1b887b7f..75e706c1 100644 --- a/tests/normalize/test_normalize_errors.py +++ b/tests/normalize/test_normalize_errors.py @@ -11,7 +11,8 @@ import pytest -from climetlab.decorators import availability, normalize +from climetlab.decorators import availability +from climetlab.decorators import normalize def name_no_default(name): diff --git a/tests/normalize/test_transformers.py b/tests/normalize/test_transformers.py index 53057afe..c27c7c3f 100644 --- a/tests/normalize/test_transformers.py +++ b/tests/normalize/test_transformers.py @@ -13,22 +13,21 @@ import pytest -from climetlab.arguments.climetlab_types import ( - BoundingBoxType, - DateListType, - DateType, - EnumListType, - EnumType, - FloatListType, - FloatType, - IntListType, - IntType, - StrListType, - StrType, - VariableListType, - VariableType, -) -from climetlab.arguments.transformers import FormatTransformer, TypeTransformer +from climetlab.arguments.climetlab_types import BoundingBoxType +from climetlab.arguments.climetlab_types import DateListType +from climetlab.arguments.climetlab_types import DateType +from climetlab.arguments.climetlab_types import EnumListType +from climetlab.arguments.climetlab_types import EnumType +from climetlab.arguments.climetlab_types import FloatListType +from climetlab.arguments.climetlab_types import FloatType +from climetlab.arguments.climetlab_types import IntListType +from climetlab.arguments.climetlab_types import IntType +from climetlab.arguments.climetlab_types import StrListType +from climetlab.arguments.climetlab_types import StrType +from climetlab.arguments.climetlab_types import VariableListType +from climetlab.arguments.climetlab_types import VariableType +from climetlab.arguments.transformers import FormatTransformer +from climetlab.arguments.transformers import TypeTransformer from climetlab.utils.bbox import BoundingBox enum = ("a", "b", "c") @@ -54,21 +53,15 @@ def test_types(): assert TypeTransformer(None, type=FloatType).transform("3.14") == 3.14 assert TypeTransformer(None, type=FloatListType).transform(3.14) == [3.14] - assert TypeTransformer(None, type=DateType).transform( - 20000101 - ) == datetime.datetime(2000, 1, 1) + assert TypeTransformer(None, type=DateType).transform(20000101) == datetime.datetime(2000, 1, 1) - assert TypeTransformer(None, type=DateListType).transform( - "20000101/to/20000103" - ) == [ + assert TypeTransformer(None, type=DateListType).transform("20000101/to/20000103") == [ datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2), datetime.datetime(2000, 1, 3), ] - assert TypeTransformer(None, type=DateListType).transform( - (20000101, 20000102, 20000103) - ) == [ + assert TypeTransformer(None, type=DateListType).transform((20000101, 20000102, 20000103)) == [ datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2), datetime.datetime(2000, 1, 3), @@ -80,86 +73,56 @@ def test_types(): with pytest.raises(AssertionError): # FIXME: Not sure what this should be assert TypeTransformer(None, type=VariableListType("cf")).transform(42) == 0 - assert TypeTransformer(None, type=BoundingBoxType).transform( - (1, -1, -1, 1) - ) == BoundingBox(north=1, west=-1, south=-1, east=1) + assert TypeTransformer(None, type=BoundingBoxType).transform((1, -1, -1, 1)) == BoundingBox( + north=1, west=-1, south=-1, east=1 + ) def test_formats(): - assert ( - FormatTransformer(None, type=EnumType(enum), format="%4s").transform("a") - == " a" - ) + assert FormatTransformer(None, type=EnumType(enum), format="%4s").transform("a") == " a" - assert FormatTransformer(None, type=EnumListType(enum), format="%4s").transform( - ("a", "b") - ) == [" a", " b"] + assert FormatTransformer(None, type=EnumListType(enum), format="%4s").transform(("a", "b")) == [" a", " b"] assert FormatTransformer(None, type=StrType, format="%4s").transform("a") == " a" - assert FormatTransformer(None, type=StrListType, format="%4s").transform( - ("a", "b") - ) == [" a", " b"] + assert FormatTransformer(None, type=StrListType, format="%4s").transform(("a", "b")) == [" a", " b"] assert FormatTransformer(None, type=IntType, format="%04d").transform(42) == "0042" - assert FormatTransformer(None, type=IntListType, format="%04d").transform( - (42, 43) - ) == ["0042", "0043"] + assert FormatTransformer(None, type=IntListType, format="%04d").transform((42, 43)) == ["0042", "0043"] - assert ( - FormatTransformer(None, type=FloatType, format="%4s").transform("3.14") - == "3.14" - ) + assert FormatTransformer(None, type=FloatType, format="%4s").transform("3.14") == "3.14" - assert ( - FormatTransformer(None, type=FloatType, format="%.1f").transform(3.14) == "3.1" - ) + assert FormatTransformer(None, type=FloatType, format="%.1f").transform(3.14) == "3.1" - assert FormatTransformer(None, type=FloatListType, format="%.1f").transform( - (3.14, 2.72) - ) == ["3.1", "2.7"] + assert FormatTransformer(None, type=FloatListType, format="%.1f").transform((3.14, 2.72)) == ["3.1", "2.7"] - assert ( - FormatTransformer(None, type=DateType, format="%Y").transform( - datetime.datetime(2000, 1, 1) - ) - == "2000" - ) + assert FormatTransformer(None, type=DateType, format="%Y").transform(datetime.datetime(2000, 1, 1)) == "2000" assert FormatTransformer(None, type=DateListType, format="%d").transform( (datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)) ) == ["01", "02"] with pytest.raises(Exception): # FIXME: Not sure what this should be - assert ( - FormatTransformer(None, type=VariableType, format="%4s").transform(42) == 0 - ) + assert FormatTransformer(None, type=VariableType, format="%4s").transform(42) == 0 with pytest.raises(Exception): # FIXME: Not sure what this should be - assert ( - FormatTransformer(None, type=VariableListType, format="%4s").transform(42) - == 0 - ) + assert FormatTransformer(None, type=VariableListType, format="%4s").transform(42) == 0 with pytest.raises(Exception): # FIXME: Not sure what this should be - assert FormatTransformer(None, type=BoundingBoxType, format="%4s").transform( - (1, -1, -1, 1) - ) == BoundingBox(north=1, west=-1, south=-1, east=1) + assert FormatTransformer(None, type=BoundingBoxType, format="%4s").transform((1, -1, -1, 1)) == BoundingBox( + north=1, west=-1, south=-1, east=1 + ) b1 = BoundingBox(north=90, west=-45, south=-90, east=45) - assert FormatTransformer(None, type=BoundingBoxType, format=tuple).transform( - b1 - ) == ( + assert FormatTransformer(None, type=BoundingBoxType, format=tuple).transform(b1) == ( 90.0, -45.0, -90.0, 45.0, ) - assert FormatTransformer(None, type=BoundingBoxType, format="dict").transform( - b1 - ) == { + assert FormatTransformer(None, type=BoundingBoxType, format="dict").transform(b1) == { "east": 45.0, "north": 90.0, "south": -90.0, diff --git a/tests/readers/test_csv_reader.py b/tests/readers/test_csv_reader.py index 45784e01..0d489473 100644 --- a/tests/readers/test_csv_reader.py +++ b/tests/readers/test_csv_reader.py @@ -14,7 +14,8 @@ import pytest import climetlab as cml -from climetlab.testing import MISSING, TEST_DATA_URL +from climetlab.testing import MISSING +from climetlab.testing import TEST_DATA_URL def test_csv_1(): diff --git a/tests/readers/test_grib_reader.py b/tests/readers/test_grib_reader.py index bef24c1b..f6615268 100644 --- a/tests/readers/test_grib_reader.py +++ b/tests/readers/test_grib_reader.py @@ -14,8 +14,10 @@ import pytest -from climetlab import load_source, plot_map -from climetlab.testing import NO_CDS, climetlab_file +from climetlab import load_source +from climetlab import plot_map +from climetlab.testing import NO_CDS +from climetlab.testing import climetlab_file def test_plot(): @@ -81,9 +83,7 @@ def test_datetime(): assert s.to_datetime() == datetime.datetime(2020, 5, 13, 12), s.to_datetime() - assert s.to_datetime_list() == [ - datetime.datetime(2020, 5, 13, 12) - ], s.to_datetime_list() + assert s.to_datetime_list() == [datetime.datetime(2020, 5, 13, 12)], s.to_datetime_list() s = load_source( "climetlab-testing", diff --git a/tests/readers/test_netcdf_reader.py b/tests/readers/test_netcdf_reader.py index a8ddbfa8..a601d7ce 100644 --- a/tests/readers/test_netcdf_reader.py +++ b/tests/readers/test_netcdf_reader.py @@ -14,9 +14,11 @@ import pytest import climetlab as cml -from climetlab import load_source, plot_map +from climetlab import load_source +from climetlab import plot_map from climetlab.readers.netcdf.field import NetCDFField -from climetlab.testing import NO_CDS, climetlab_file +from climetlab.testing import NO_CDS +from climetlab.testing import climetlab_file def test_netcdf(): @@ -129,9 +131,7 @@ def test_datetime(): assert s.to_datetime() == datetime.datetime(2020, 5, 13, 12), s.to_datetime() - assert s.to_datetime_list() == [ - datetime.datetime(2020, 5, 13, 12) - ], s.to_datetime_list() + assert s.to_datetime_list() == [datetime.datetime(2020, 5, 13, 12)], s.to_datetime_list() s = load_source( "climetlab-testing", diff --git a/tests/readers/test_tfdataset_reader.py b/tests/readers/test_tfdataset_reader.py index b2146fe3..29c9e7f1 100644 --- a/tests/readers/test_tfdataset_reader.py +++ b/tests/readers/test_tfdataset_reader.py @@ -12,7 +12,8 @@ import pytest from climetlab import load_source -from climetlab.testing import MISSING, TEST_DATA_URL +from climetlab.testing import MISSING +from climetlab.testing import TEST_DATA_URL @pytest.mark.skipif(MISSING("tensorflow"), reason="No tensorflow") diff --git a/tests/sources/test_constants.py b/tests/sources/test_constants.py index 4a2e00cd..fb8989fe 100644 --- a/tests/sources/test_constants.py +++ b/tests/sources/test_constants.py @@ -93,9 +93,7 @@ def test_constant_3(): date = sample[0].datetime() date_plus_6h = date + datetime.timedelta(hours=6) - a = load_source( - "constants", sample, date=date_plus_6h, param="cos_solar_zenith_angle" - ) + a = load_source("constants", sample, date=date_plus_6h, param="cos_solar_zenith_angle") b = load_source("constants", sample, date=date, param="cos_solar_zenith_angle+6h") assert np.all(a.to_numpy() == b.to_numpy()) assert a[0].metadata("param") == "cos_solar_zenith_angle" @@ -103,9 +101,7 @@ def test_constant_3(): date = sample[0].datetime() date_minus_30d = date + datetime.timedelta(days=-30) - a = load_source( - "constants", sample, date=date_minus_30d, param="cos_solar_zenith_angle" - ) + a = load_source("constants", sample, date=date_minus_30d, param="cos_solar_zenith_angle") b = load_source("constants", sample, date=date, param="cos_solar_zenith_angle-30d") assert np.all(a.to_numpy() == b.to_numpy()) assert a[0].metadata("param") == "cos_solar_zenith_angle" diff --git a/tests/sources/test_file.py b/tests/sources/test_file.py index ecd7aadc..179cbb26 100644 --- a/tests/sources/test_file.py +++ b/tests/sources/test_file.py @@ -48,9 +48,7 @@ def test_user_1(): LOG.exception("unlink(%s)", home_file) -@pytest.mark.skipif( - sys.platform == "win32", reason="Cannot (yet) use expandvars on Windows" -) +@pytest.mark.skipif(sys.platform == "win32", reason="Cannot (yet) use expandvars on Windows") def test_user_2(): s = load_source("file", climetlab_file("docs/examples/test.grib")) home_file = os.path.expanduser("~/.climetlab/test.grib") diff --git a/tests/sources/test_indexed_url.py b/tests/sources/test_indexed_url.py index d528fdd1..bab6e399 100644 --- a/tests/sources/test_indexed_url.py +++ b/tests/sources/test_indexed_url.py @@ -92,9 +92,7 @@ def test_cli_index_url(baseurl, capsys): @pytest.mark.download def test_cli_index_urls(baseurl, capsys): app = CliMetLabApp() - app.onecmd( - f"index_urls --baseurl {baseurl}/test-data/input/indexed-urls large_grib_1.grb large_grib_2.grb" - ) + app.onecmd(f"index_urls --baseurl {baseurl}/test-data/input/indexed-urls large_grib_1.grb large_grib_2.grb") out, err = capsys.readouterr() lines = out.split("\n") if "error" in err.lower(): diff --git a/tests/sources/test_indexed_urls.py b/tests/sources/test_indexed_urls.py index 378f08af..cdf09040 100644 --- a/tests/sources/test_indexed_urls.py +++ b/tests/sources/test_indexed_urls.py @@ -50,9 +50,7 @@ def check(source, i, ref): @pytest.mark.download @pytest.mark.parametrize("baseurl", CML_BASEURLS) def test_indexed_urls_deprecated(baseurl): - PER_URL_INDEX = PerUrlIndex( - baseurl + "/test-data/input/indexed-urls/large_grib_{n}.grb" - ) + PER_URL_INDEX = PerUrlIndex(baseurl + "/test-data/input/indexed-urls/large_grib_{n}.grb") source = load_source("indexed-urls", PER_URL_INDEX, REQUEST_1) check(source, 0, 49.86508481081071) ds = source.to_xarray() diff --git a/tests/sources/test_mars.py b/tests/sources/test_mars.py index 77152c0d..2343b48a 100644 --- a/tests/sources/test_mars.py +++ b/tests/sources/test_mars.py @@ -18,9 +18,7 @@ @pytest.mark.long_test @pytest.mark.download @pytest.mark.skipif(NO_MARS, reason="No access to MARS") -@pytest.mark.skip( - reason="No access to MARS for now (DHS move)" -) # TODO: remove this line +@pytest.mark.skip(reason="No access to MARS for now (DHS move)") # TODO: remove this line def test_mars_grib_1(): s = load_source( "mars", @@ -36,9 +34,7 @@ def test_mars_grib_1(): @pytest.mark.long_test @pytest.mark.download @pytest.mark.skipif(NO_MARS, reason="No access to MARS") -@pytest.mark.skip( - reason="No access to MARS for now (DHS move)" -) # TODO: remove this line +@pytest.mark.skip(reason="No access to MARS for now (DHS move)") # TODO: remove this line def test_mars_grib_2(): s = load_source( "mars", diff --git a/tests/sources/test_mirror.py b/tests/sources/test_mirror.py index 5358de9f..8e05eed0 100644 --- a/tests/sources/test_mirror.py +++ b/tests/sources/test_mirror.py @@ -16,12 +16,16 @@ import numpy as np import pytest -from climetlab import load_source, settings +from climetlab import load_source +from climetlab import settings from climetlab.core.caching import purge_cache from climetlab.core.temporary import temp_directory -from climetlab.mirrors import _reset_mirrors, get_active_mirrors +from climetlab.mirrors import _reset_mirrors +from climetlab.mirrors import get_active_mirrors from climetlab.mirrors.directory_mirror import DirectoryMirror -from climetlab.testing import NO_EOD, OfflineError, network_off +from climetlab.testing import NO_EOD +from climetlab.testing import OfflineError +from climetlab.testing import network_off def load(**kwargs): @@ -139,9 +143,7 @@ def test_mirror_url_source_env_var_1(mirror_dirs): assert len(get_active_mirrors()) == 1, get_active_mirrors() -@pytest.mark.skipif( - sys.platform == "win32", reason="Cannot unlink tmp directory on Windows" -) +@pytest.mark.skipif(sys.platform == "win32", reason="Cannot unlink tmp directory on Windows") @pytest.mark.download def test_mirror_url_source_env_var_2(mirror_dirs): mirror_dir, _ = mirror_dirs diff --git a/tests/sources/test_multi.py b/tests/sources/test_multi.py index 75fd99ad..5f6d796f 100644 --- a/tests/sources/test_multi.py +++ b/tests/sources/test_multi.py @@ -17,8 +17,10 @@ import pytest from climetlab import load_source -from climetlab.core.temporary import temp_directory, temp_file -from climetlab.testing import MISSING, TEST_DATA_URL +from climetlab.core.temporary import temp_directory +from climetlab.core.temporary import temp_file +from climetlab.testing import MISSING +from climetlab.testing import TEST_DATA_URL LOG = logging.getLogger(__name__) diff --git a/tests/sources/test_url.py b/tests/sources/test_url.py index 8deddac1..3876acd9 100644 --- a/tests/sources/test_url.py +++ b/tests/sources/test_url.py @@ -15,9 +15,13 @@ import pytest -from climetlab import load_source, settings +from climetlab import load_source +from climetlab import settings from climetlab.core.temporary import temp_directory -from climetlab.testing import IN_GITHUB, TEST_DATA_URL, climetlab_file, network_off +from climetlab.testing import IN_GITHUB +from climetlab.testing import TEST_DATA_URL +from climetlab.testing import climetlab_file +from climetlab.testing import network_off @pytest.mark.skipif( # TODO: fix diff --git a/tests/sources/test_zarr.py b/tests/sources/test_zarr.py index b034e9ca..a43917a4 100644 --- a/tests/sources/test_zarr.py +++ b/tests/sources/test_zarr.py @@ -13,7 +13,8 @@ import pytest from climetlab import load_source -from climetlab.testing import MISSING, TEST_DATA_URL +from climetlab.testing import MISSING +from climetlab.testing import TEST_DATA_URL NOT_S3_URL = f"{TEST_DATA_URL}/input" S3_URL = "https://object-store.os-api.cci1.ecmwf.int/climetlab/test-data/0.5/fixtures" @@ -48,20 +49,12 @@ def test_zarr_source_2(): assert len(ds.forecast_time) == 2 dates = to_datetime_list(ds.forecast_time) - assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 2).strftime( - "%Y-%m-%d" - ) - assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 9).strftime( - "%Y-%m-%d" - ) + assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 2).strftime("%Y-%m-%d") + assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 9).strftime("%Y-%m-%d") dates = to_datetime_list(ds.forecast_time.values) - assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 2).strftime( - "%Y-%m-%d" - ) - assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 9).strftime( - "%Y-%m-%d" - ) + assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 2).strftime("%Y-%m-%d") + assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2020, 1, 9).strftime("%Y-%m-%d") @pytest.mark.skipif(MISSING("zarr", "s3fs"), reason="Zarr or S3FS not installed") @@ -81,32 +74,16 @@ def test_zarr_source_3(): assert len(ds.forecast_time) == 8 dates = to_datetime_list(ds.forecast_time) - assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 2).strftime( - "%Y-%m-%d" - ) - assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 9).strftime( - "%Y-%m-%d" - ) - assert dates[2].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 2).strftime( - "%Y-%m-%d" - ) - assert dates[3].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 9).strftime( - "%Y-%m-%d" - ) + assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 2).strftime("%Y-%m-%d") + assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 9).strftime("%Y-%m-%d") + assert dates[2].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 2).strftime("%Y-%m-%d") + assert dates[3].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 9).strftime("%Y-%m-%d") dates = to_datetime_list(ds.forecast_time.values) - assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 2).strftime( - "%Y-%m-%d" - ) - assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 9).strftime( - "%Y-%m-%d" - ) - assert dates[2].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 2).strftime( - "%Y-%m-%d" - ) - assert dates[3].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 9).strftime( - "%Y-%m-%d" - ) + assert dates[0].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 2).strftime("%Y-%m-%d") + assert dates[1].strftime("%Y-%m-%d") == datetime.datetime(2000, 1, 9).strftime("%Y-%m-%d") + assert dates[2].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 2).strftime("%Y-%m-%d") + assert dates[3].strftime("%Y-%m-%d") == datetime.datetime(2001, 1, 9).strftime("%Y-%m-%d") @pytest.mark.skipif(MISSING("zarr", "s3fs"), reason="Zarr or S3FS not installed") diff --git a/tests/sources/test_zenodo.py b/tests/sources/test_zenodo.py index 48d1e56f..30b228a2 100755 --- a/tests/sources/test_zenodo.py +++ b/tests/sources/test_zenodo.py @@ -14,7 +14,8 @@ import climetlab as cml from climetlab.datasets import dataset_from_yaml -from climetlab.testing import IN_GITHUB, MISSING +from climetlab.testing import IN_GITHUB +from climetlab.testing import MISSING LOG = logging.getLogger(__name__) diff --git a/tests/test_annotations.py b/tests/test_annotations.py index 8dd8bdc2..34ba5085 100644 --- a/tests/test_annotations.py +++ b/tests/test_annotations.py @@ -14,7 +14,8 @@ import pytest import xarray as xr -from climetlab.core.metadata import annotate, annotation +from climetlab.core.metadata import annotate +from climetlab.core.metadata import annotation class Owner: diff --git a/tests/test_bbox.py b/tests/test_bbox.py index b8a326a8..add5d30b 100644 --- a/tests/test_bbox.py +++ b/tests/test_bbox.py @@ -121,9 +121,7 @@ def test_overlapping_bbox_1(): sets.append([one, one, one, two, one, two, one, three]) for i, s in enumerate(sets): merged = BoundingBox.multi_merge(s) - expected = BoundingBox( - east=offset + 60, west=offset + 10, north=90, south=-90 - ) + expected = BoundingBox(east=offset + 60, west=offset + 10, north=90, south=-90) assert merged.east == expected.east, ( i, merged.east, @@ -148,9 +146,7 @@ def test_overlapping_bbox_1(): sets.append([one, one, one, two, four, one, two, one, three]) for i, s in enumerate(sets): merged = BoundingBox.multi_merge(s) - expected = BoundingBox( - east=offset + 60, west=offset - 200, north=90, south=-90 - ) + expected = BoundingBox(east=offset + 60, west=offset - 200, north=90, south=-90) assert merged.east % 360 == expected.east % 360, ( i, offset, diff --git a/tests/test_cli.py b/tests/test_cli.py index 5b4317f7..b355d108 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -15,7 +15,8 @@ from climetlab import settings from climetlab.core.temporary import temp_env -from climetlab.scripts.main import CliMetLabApp, command_list +from climetlab.scripts.main import CliMetLabApp +from climetlab.scripts.main import command_list LOG = logging.getLogger(__name__) @@ -24,7 +25,7 @@ def test_cli_no_args(command, capsys): app = CliMetLabApp() app.onecmd(command) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() assert not out.startswith("Unknown command"), out diff --git a/tests/test_datasets.py b/tests/test_datasets.py index c6f8f5ab..cb8ac4d6 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -12,7 +12,8 @@ import pytest import climetlab as cml -from climetlab import dataset, load_dataset +from climetlab import dataset +from climetlab import load_dataset from climetlab.testing import NO_CDS @@ -35,9 +36,7 @@ def test_era5_temperature(): @pytest.mark.download @pytest.mark.skipif(NO_CDS, reason="No access to CDS") def test_datetime(): - data = cml.load_dataset( - "era5-temperature", domain="france", period=(1980,), time=12 - ) + data = cml.load_dataset("era5-temperature", domain="france", period=(1980,), time=12) data["1980-12-09 12:00"] with pytest.raises(ValueError): data.sel(date="1980-12-09 12:00") diff --git a/tests/test_dates.py b/tests/test_dates.py index 41df7e95..1feb8169 100644 --- a/tests/test_dates.py +++ b/tests/test_dates.py @@ -15,8 +15,10 @@ import pytest from climetlab import load_source -from climetlab.testing import MISSING, climetlab_file -from climetlab.utils.dates import to_datetime, to_datetime_list +from climetlab.testing import MISSING +from climetlab.testing import climetlab_file +from climetlab.utils.dates import to_datetime +from climetlab.utils.dates import to_datetime_list def test_to_datetime_1(): @@ -43,9 +45,7 @@ def test_to_datetime_2(): assert to_datetime("1851-06-25 06:00:00") == datetime.datetime(1851, 6, 25, 6) assert to_datetime("1851-06-25T06:00:00") == datetime.datetime(1851, 6, 25, 6) - assert to_datetime("1851-06-25T06:00:00Z") == datetime.datetime( - 1851, 6, 25, 6, tzinfo=datetime.timezone.utc - ) + assert to_datetime("1851-06-25T06:00:00Z") == datetime.datetime(1851, 6, 25, 6, tzinfo=datetime.timezone.utc) assert to_datetime(-2) == to_datetime(0) - datetime.timedelta(days=2) @@ -89,12 +89,8 @@ def test_to_datetimes_list(): assert len(to_datetime_list((-10, "to", -1))) == 10 - assert to_datetime_list(datetime.datetime(2000, 1, 7)) == [ - datetime.datetime(2000, 1, 7) - ] - assert to_datetime_list([datetime.datetime(2000, 1, 7)]) == [ - datetime.datetime(2000, 1, 7) - ] + assert to_datetime_list(datetime.datetime(2000, 1, 7)) == [datetime.datetime(2000, 1, 7)] + assert to_datetime_list([datetime.datetime(2000, 1, 7)]) == [datetime.datetime(2000, 1, 7)] assert to_datetime_list( [ datetime.datetime(2000, 1, 4), @@ -115,9 +111,7 @@ def test_to_datetimes_list_grib(): def test_pandas_dates(): import pandas as pd - assert to_datetime_list( - pd.date_range(start="2020-01-02", end="2020-01-16", freq="w-thu") - ) == [ + assert to_datetime_list(pd.date_range(start="2020-01-02", end="2020-01-16", freq="w-thu")) == [ datetime.datetime(2020, 1, 2), datetime.datetime(2020, 1, 9), datetime.datetime(2020, 1, 16), @@ -142,9 +136,7 @@ def test_pandas_dates_2(): @pytest.mark.skipif(MISSING("zarr", "s3fs"), reason="zarr or s3fs not installed") def test_zarr_dates(): - S3_URL = ( - "https://object-store.os-api.cci1.ecmwf.int/climetlab/test-data/0.5/fixtures" - ) + S3_URL = "https://object-store.os-api.cci1.ecmwf.int/climetlab/test-data/0.5/fixtures" source = load_source( "zarr-s3", [ diff --git a/tests/test_domains.py b/tests/test_domains.py index 7585735b..540eae68 100644 --- a/tests/test_domains.py +++ b/tests/test_domains.py @@ -10,7 +10,8 @@ # -from climetlab.utils.domains import domain_to_area, domain_to_area_long_name +from climetlab.utils.domains import domain_to_area +from climetlab.utils.domains import domain_to_area_long_name def test_domains(): diff --git a/tests/test_download.py b/tests/test_download.py index a9b2915b..b22e76a7 100644 --- a/tests/test_download.py +++ b/tests/test_download.py @@ -26,9 +26,7 @@ def path_to_url(path): @pytest.mark.download @pytest.mark.small_download def test_download_1(): - url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib?_=%s" % ( - time.time(), - ) + url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib?_=%s" % (time.time(),) download_and_cache(url) diff --git a/tests/test_factorise.py b/tests/test_factorise.py index acf4a4f7..e4d70039 100644 --- a/tests/test_factorise.py +++ b/tests/test_factorise.py @@ -92,9 +92,7 @@ def test_factorise_3(): intervals=["date"], ) - assert _(c.to_list()) == _( - [{"date": ["1990-01-01/1990-01-02", "1990-01-04/1990-01-15"]}] - ) + assert _(c.to_list()) == _([{"date": ["1990-01-01/1990-01-02", "1990-01-04/1990-01-15"]}]) assert c.count() == 14 @@ -138,9 +136,7 @@ def test_factorise_4(): assert c.select(date="1990-01-01").count() == 2 assert c.select(date="1990-01-01").select(param="Z").count() == 1 - assert _(c.select(date="1990-01-01").to_list()) == _( - [{"date": ["1990-01-01"], "param": ["T", "Z"]}] - ) + assert _(c.select(date="1990-01-01").to_list()) == _([{"date": ["1990-01-01"], "param": ["T", "Z"]}]) assert _(c.select(date="1990-01-02/1990-01-05").to_list()) == _( [ diff --git a/tests/test_patterns.py b/tests/test_patterns.py index d1aa2e11..b7b55d67 100644 --- a/tests/test_patterns.py +++ b/tests/test_patterns.py @@ -17,16 +17,10 @@ def test_patterns(): assert p.names == ["date", "level", "param"], p.names - assert ( - p.substitute(dict(date="2000-01-01", param="2t", level=12)) - == "20000101-2t-12-012" - ) + assert p.substitute(dict(date="2000-01-01", param="2t", level=12)) == "20000101-2t-12-012" p = Pattern("{variable:enum(2t,tp)}.{type:enum(rt,hc)}.{date:date(%Y%m%d)}.grib") - assert ( - p.substitute(dict(date="2000-01-01", variable="tp", type="rt")) - == "tp.rt.20000101.grib" - ) + assert p.substitute(dict(date="2000-01-01", variable="tp", type="rt")) == "tp.rt.20000101.grib" assert p.substitute(dict(date="2000-01-01", variable=["tp", "2t"], type="rt")) == [ "tp.rt.20000101.grib", diff --git a/tests/test_thread.py b/tests/test_thread.py index e07b5fdf..e8b1c8a6 100644 --- a/tests/test_thread.py +++ b/tests/test_thread.py @@ -11,7 +11,8 @@ import time -from datetime import datetime, timedelta +from datetime import datetime +from datetime import timedelta from climetlab.core.thread import SoftThreadPool diff --git a/tests/test_unpack.py b/tests/test_unpack.py index f51c9356..9933c505 100644 --- a/tests/test_unpack.py +++ b/tests/test_unpack.py @@ -15,9 +15,11 @@ import pytest -from climetlab import load_source, settings +from climetlab import load_source +from climetlab import settings from climetlab.core.temporary import temp_directory -from climetlab.testing import OfflineError, network_off +from climetlab.testing import OfflineError +from climetlab.testing import network_off LOG = logging.getLogger(__name__) diff --git a/tests/test_utils.py b/tests/test_utils.py index 4a9f378b..831d5c88 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -14,17 +14,16 @@ import pytest -from climetlab.utils import load_json_or_yaml, string_to_args -from climetlab.utils.humanize import ( - as_bytes, - as_seconds, - as_timedelta, - bytes, - number, - plural, - seconds, - when, -) +from climetlab.utils import load_json_or_yaml +from climetlab.utils import string_to_args +from climetlab.utils.humanize import as_bytes +from climetlab.utils.humanize import as_seconds +from climetlab.utils.humanize import as_timedelta +from climetlab.utils.humanize import bytes +from climetlab.utils.humanize import number +from climetlab.utils.humanize import plural +from climetlab.utils.humanize import seconds +from climetlab.utils.humanize import when def test_load_yaml(): @@ -139,14 +138,8 @@ def test_humanize(): assert when(now - datetime.timedelta(days=3660), now) == "10 years ago" assert when(now, now - datetime.timedelta(days=3660)) == "in 10 years" - assert ( - when(now - datetime.timedelta(days=3660), now, short=False) - == "on Tuesday 30 September 2011" - ) - assert ( - when(now, now - datetime.timedelta(days=3660), short=False) - == "on Monday 6 October 2021" - ) + assert when(now - datetime.timedelta(days=3660), now, short=False) == "on Tuesday 30 September 2011" + assert when(now, now - datetime.timedelta(days=3660), short=False) == "on Monday 6 October 2021" def test_as_timedelta(): diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 0f1c276d..00000000 --- a/tox.ini +++ /dev/null @@ -1,10 +0,0 @@ -[flake8] -; ignore = E226,E302,E41 -max-line-length = 120 -; exclude = tests/* -; See https://black.readthedocs.io/en/stable/the_black_code_style.html -exclude = - dev/* - experiments - ?.py -extend-ignore = E203 \ No newline at end of file