From db03448cd67da7dc8d7ba065a961c0c31192749c Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 09:07:37 +0100 Subject: [PATCH 01/20] Create .yamllint --- .yamllint | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 .yamllint diff --git a/.yamllint b/.yamllint new file mode 100644 index 000000000..2ffc6f074 --- /dev/null +++ b/.yamllint @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +# Configuration is based on and overwrites 'default' configuration +extends: default + +# Files to apply yamllint to +yaml-files: + - '*.yaml' + - '*.yml' + - '.yamllint' + +rules: + braces: + # Do not allow flow mappings using curly braces "{" and "}" + forbid: true + brackets: + max-spaces-inside: 0 + max-spaces-inside-empty: 0 + comments: + require-starting-space: true + min-spaces-from-content: 1 + # Force correct indentation of comments + comments-indentation: {} + # Do not require a specific document start marker + document-start: disable + document-end: disable + empty-lines: + max: 3 + max-end: 0 + indentation: + spaces: consistent + # Consistent indent-sequences clash with environment.yaml exported by conda + indent-sequences: whatever + # Disallow duplicate keys in listings + key-duplicates: {} + line-length: + level: warning + max: 80 + new-line-at-end-of-file: enable + truthy: + check-keys: false # Disable truthy check hits on keys like "on": ... From 50eed2bb2266bef22086db228d0cb2f5fac42b5b Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 09:07:48 +0100 Subject: [PATCH 02/20] Create .pre-commit-config.yaml --- .pre-commit-config.yaml | 45 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..6b2c6a5c0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,45 @@ +# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: check-merge-conflict + - id: check-symlinks + - id: check-yaml + - id: end-of-file-fixer + - id: fix-encoding-pragma + +# Formatting with "black" coding style +- repo: https://github.com/psf/black + rev: 21.12b0 + hooks: + # Format Python files + - id: black + # Format Jupyter Python notebooks + - id: black-jupyter + +# Use yamllint to check for valid YAML files and list syntax errors +- repo: https://github.com/adrienverge/yamllint.git + rev: v1.26.3 + hooks: + - id: yamllint + args: [--format, parsable, --strict, -c=.yamllint] + +# Use yamlfmt to for formatting YAML files, something yamllint doesn't support +- repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt + rev: 0.1.0 + hooks: + - id: yamlfmt + args: [--mapping, '2', # Indentation by 2 spaces + --preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO + --implicit_start, # Add no start marker to YAML files + --width, '88'] # Line width from black + +# Check for FSFE REUSE compliance (licensing) +- repo: https://github.com/fsfe/reuse-tool + rev: v0.14.0 + hooks: + - id: reuse \ No newline at end of file From a0790069260fbe24168694d19de1aa684ad3f403 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 09:28:45 +0100 Subject: [PATCH 03/20] Update .pre-commit-config.yaml Remove strictness for yamllint to prevent "Failed" precommit status on too long lines. Remove width restriction on yamlfmt to prevent breaking of long lists which are then indented "inconsistently" according to yamllint. (Arbitrary changes which seem reasonable at the moment; mainly caused by inconsistencies between pyaaml and ruamel.yaml). --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6b2c6a5c0..080261557 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: rev: v1.26.3 hooks: - id: yamllint - args: [--format, parsable, --strict, -c=.yamllint] + args: [--format, parsable, -c=.yamllint] # Use yamlfmt to for formatting YAML files, something yamllint doesn't support - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt @@ -36,7 +36,7 @@ repos: args: [--mapping, '2', # Indentation by 2 spaces --preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO --implicit_start, # Add no start marker to YAML files - --width, '88'] # Line width from black + ] # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool From 60886bacb21fecf4219161ead469912bac4a34f9 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 09:29:09 +0100 Subject: [PATCH 04/20] Update .yamllint Line length according to black (80 characters + <=10%). --- .yamllint | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.yamllint b/.yamllint index 2ffc6f074..ba40849e5 100644 --- a/.yamllint +++ b/.yamllint @@ -37,7 +37,7 @@ rules: key-duplicates: {} line-length: level: warning - max: 80 + max: 88 new-line-at-end-of-file: enable truthy: check-keys: false # Disable truthy check hits on keys like "on": ... From 28b898f00a4d15242f766982456db5a473d46122 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 09:31:46 +0100 Subject: [PATCH 05/20] Remove symlinks checker (not needed) and don't tough LICENSES. --- .pre-commit-config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 080261557..b741c6b32 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,12 +2,13 @@ # # SPDX-License-Identifier: CC0-1.0 +exclude: "^LICENSES" + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 hooks: - id: check-merge-conflict - - id: check-symlinks - id: check-yaml - id: end-of-file-fixer - id: fix-encoding-pragma From 7202c3fec4e45fd6c22d645fa4daa6cb97cf236b Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 10:09:04 +0100 Subject: [PATCH 06/20] Move comments from mapping to top (prevent removal by pyamlfmt). --- .pre-commit-config.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b741c6b32..19e15e2e2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,6 +30,8 @@ repos: args: [--format, parsable, -c=.yamllint] # Use yamlfmt to for formatting YAML files, something yamllint doesn't support +# --preserve-quotes to prevent parsing of e.g. Norway "NO" != False == NO +# --implicit_start to prevent adding a starting marker to YAML files - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt rev: 0.1.0 hooks: From 9c51c8ed7dd7a5f4cee7d3157cefae80cf7d1add Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 10:17:37 +0100 Subject: [PATCH 07/20] Manually format .pre-commit-config.yaml for yamllint. --- .pre-commit-config.yaml | 50 ++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19e15e2e2..31de9ba1f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,29 +5,29 @@ exclude: "^LICENSES" repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 - hooks: - - id: check-merge-conflict - - id: check-yaml - - id: end-of-file-fixer - - id: fix-encoding-pragma +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + - id: end-of-file-fixer + - id: fix-encoding-pragma # Formatting with "black" coding style -- repo: https://github.com/psf/black - rev: 21.12b0 - hooks: - # Format Python files - - id: black - # Format Jupyter Python notebooks - - id: black-jupyter +- repo: https://github.com/psf/black + rev: 21.12b0 + hooks: + # Format Python files + - id: black + # Format Jupyter Python notebooks + - id: black-jupyter # Use yamllint to check for valid YAML files and list syntax errors - repo: https://github.com/adrienverge/yamllint.git rev: v1.26.3 hooks: - - id: yamllint - args: [--format, parsable, -c=.yamllint] + - id: yamllint + args: [--format, parsable, -c=.yamllint] # Use yamlfmt to for formatting YAML files, something yamllint doesn't support # --preserve-quotes to prevent parsing of e.g. Norway "NO" != False == NO @@ -35,14 +35,14 @@ repos: - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt rev: 0.1.0 hooks: - - id: yamlfmt - args: [--mapping, '2', # Indentation by 2 spaces - --preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO - --implicit_start, # Add no start marker to YAML files - ] + - id: yamlfmt + args: [--mapping, '2', # Indentation by 2 spaces + --preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO + --implicit_start, # Add no start marker to YAML files + ] # Check for FSFE REUSE compliance (licensing) -- repo: https://github.com/fsfe/reuse-tool - rev: v0.14.0 - hooks: - - id: reuse \ No newline at end of file +- repo: https://github.com/fsfe/reuse-tool + rev: v0.14.0 + hooks: + - id: reuse From 5f982f180cb837258b990773a2d074c3ae0586d3 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 10:19:58 +0100 Subject: [PATCH 08/20] Add yamllint ignore lines for some options. Option "braces = disable" removes validity of "{}" of entries in yaml file, but those are necessary for the configuration-> manual ignore required. --- .yamllint | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.yamllint b/.yamllint index ba40849e5..7f7484f7f 100644 --- a/.yamllint +++ b/.yamllint @@ -5,12 +5,6 @@ # Configuration is based on and overwrites 'default' configuration extends: default -# Files to apply yamllint to -yaml-files: - - '*.yaml' - - '*.yml' - - '.yamllint' - rules: braces: # Do not allow flow mappings using curly braces "{" and "}" @@ -22,6 +16,7 @@ rules: require-starting-space: true min-spaces-from-content: 1 # Force correct indentation of comments + # yamllint disable-line rule:braces comments-indentation: {} # Do not require a specific document start marker document-start: disable @@ -34,6 +29,7 @@ rules: # Consistent indent-sequences clash with environment.yaml exported by conda indent-sequences: whatever # Disallow duplicate keys in listings + # yamllint disable-line rule:braces key-duplicates: {} line-length: level: warning From 591231debfeb8b2f480e45a52db449ca8f64ba59 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 10:49:39 +0100 Subject: [PATCH 09/20] Revert to defaults for mapping indentation. --- .pre-commit-config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31de9ba1f..a646c842a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -36,8 +36,7 @@ repos: rev: 0.1.0 hooks: - id: yamlfmt - args: [--mapping, '2', # Indentation by 2 spaces - --preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO + args: [--preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO --implicit_start, # Add no start marker to YAML files ] From 1c98a803f78eda8c657246093a2bb7c9dc18620b Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 11:14:37 +0100 Subject: [PATCH 10/20] Try different yaml formatter. --- .pre-commit-config.yaml | 68 ++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a646c842a..a422159fb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,43 +5,49 @@ exclude: "^LICENSES" repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 - hooks: - - id: check-merge-conflict - - id: check-yaml - - id: end-of-file-fixer - - id: fix-encoding-pragma + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + - id: end-of-file-fixer + - id: fix-encoding-pragma # Formatting with "black" coding style -- repo: https://github.com/psf/black - rev: 21.12b0 - hooks: + - repo: https://github.com/psf/black + rev: 21.12b0 + hooks: # Format Python files - - id: black + - id: black # Format Jupyter Python notebooks - - id: black-jupyter + - id: black-jupyter # Use yamllint to check for valid YAML files and list syntax errors -- repo: https://github.com/adrienverge/yamllint.git - rev: v1.26.3 - hooks: - - id: yamllint - args: [--format, parsable, -c=.yamllint] + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.26.3 + hooks: + - id: yamllint + args: [--format, parsable, -c=.yamllint] -# Use yamlfmt to for formatting YAML files, something yamllint doesn't support -# --preserve-quotes to prevent parsing of e.g. Norway "NO" != False == NO -# --implicit_start to prevent adding a starting marker to YAML files -- repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt - rev: 0.1.0 - hooks: - - id: yamlfmt - args: [--preserve-quotes, # Prevent parsing of e.g. Norway "NO" != False == NO - --implicit_start, # Add no start marker to YAML files - ] + - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks + rev: v2.2.0 + hooks: + - id: pretty-format-yaml + args: [--autofix, --indent, '2', --preserve-quotes] + +# Disable yamlfmt for no, as it creates more problems than it solves: +# Creates invalid yaml files or files conflicting with yamllint rules +## Use yamlfmt to for formatting YAML files, something yamllint doesn't support +## --preserve-quotes to prevent parsing of e.g. Norway "NO" != False == NO +## --implicit_start to prevent adding a starting marker to YAML files +# - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt +# rev: 0.1.0 +# hooks: +# - id: yamlfmt +# args: [--preserve-quotes, --implicit_start] # Check for FSFE REUSE compliance (licensing) -- repo: https://github.com/fsfe/reuse-tool - rev: v0.14.0 - hooks: - - id: reuse + - repo: https://github.com/fsfe/reuse-tool + rev: v0.14.0 + hooks: + - id: reuse From d53893992cc362da8b1d27ce8500f7fc2b26a3dd Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 11:53:33 +0100 Subject: [PATCH 11/20] Move from yamlfmt to pretty-format-yaml for yaml formatting. --- .pre-commit-config.yaml | 52 ++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a422159fb..26cd7c676 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,35 +5,35 @@ exclude: "^LICENSES" repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 - hooks: - - id: check-merge-conflict - - id: check-yaml - - id: end-of-file-fixer - - id: fix-encoding-pragma +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + - id: end-of-file-fixer + - id: fix-encoding-pragma # Formatting with "black" coding style - - repo: https://github.com/psf/black - rev: 21.12b0 - hooks: +- repo: https://github.com/psf/black + rev: 21.12b0 + hooks: # Format Python files - - id: black + - id: black # Format Jupyter Python notebooks - - id: black-jupyter + - id: black-jupyter # Use yamllint to check for valid YAML files and list syntax errors - - repo: https://github.com/adrienverge/yamllint.git - rev: v1.26.3 - hooks: - - id: yamllint - args: [--format, parsable, -c=.yamllint] +- repo: https://github.com/adrienverge/yamllint.git + rev: v1.26.3 + hooks: + - id: yamllint + args: [--format, parsable, -c=.yamllint] - - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - rev: v2.2.0 - hooks: - - id: pretty-format-yaml - args: [--autofix, --indent, '2', --preserve-quotes] +- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks + rev: v2.2.0 + hooks: + - id: pretty-format-yaml + args: [--autofix, --indent, '2', --preserve-quotes] # Disable yamlfmt for no, as it creates more problems than it solves: # Creates invalid yaml files or files conflicting with yamllint rules @@ -47,7 +47,7 @@ repos: # args: [--preserve-quotes, --implicit_start] # Check for FSFE REUSE compliance (licensing) - - repo: https://github.com/fsfe/reuse-tool - rev: v0.14.0 - hooks: - - id: reuse +- repo: https://github.com/fsfe/reuse-tool + rev: v0.14.0 + hooks: + - id: reuse From e539b36c714c2cb09e07b7144e4aca9fe4d914cd Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 11:59:53 +0100 Subject: [PATCH 12/20] Add automatic formatting of Snakefiles. --- .pre-commit-config.yaml | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 26cd7c676..b392bd3a0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,16 +35,11 @@ repos: - id: pretty-format-yaml args: [--autofix, --indent, '2', --preserve-quotes] -# Disable yamlfmt for no, as it creates more problems than it solves: -# Creates invalid yaml files or files conflicting with yamllint rules -## Use yamlfmt to for formatting YAML files, something yamllint doesn't support -## --preserve-quotes to prevent parsing of e.g. Norway "NO" != False == NO -## --implicit_start to prevent adding a starting marker to YAML files -# - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt -# rev: 0.1.0 -# hooks: -# - id: yamlfmt -# args: [--preserve-quotes, --implicit_start] +# Format Snakemake rule / workflow files +- repo: https://github.com/snakemake/snakefmt + rev: 0.4.4 + hooks: + - id: snakefmt # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool From a71a46b6e8fec62da4a68871e30a99ea1273ca9c Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Tue, 11 Jan 2022 12:02:29 +0100 Subject: [PATCH 13/20] Apply all formatters from pre-commit. (Run `pre-commit run --all-files` ) --- .github/ISSUE_TEMPLATE/config.yml | 6 +- .github/workflows/ci.yaml | 14 +- .readthedocs.yml | 2 +- .syncignore-receive | 2 +- Snakefile | 588 ++++++++++++++-------- config.default.yaml | 112 ++--- config.tutorial.yaml | 100 ++-- data/parameter_corrections.yaml | 6 +- doc/_static/theme_overrides.css | 2 +- doc/conf.py | 184 +++---- doc/requirements.txt | 2 +- envs/environment.fixed.yaml | 608 +++++++++++------------ envs/environment.yaml | 88 ++-- scripts/_helpers.py | 204 +++++--- scripts/add_electricity.py | 746 +++++++++++++++++----------- scripts/add_extra_components.py | 242 +++++---- scripts/base_network.py | 482 +++++++++++------- scripts/build_bus_regions.py | 70 ++- scripts/build_cutout.py | 29 +- scripts/build_hydro_profile.py | 38 +- scripts/build_load_data.py | 160 +++--- scripts/build_natura_raster.py | 27 +- scripts/build_powerplants.py | 54 +- scripts/build_renewable_profiles.py | 143 +++--- scripts/build_shapes.py | 160 ++++-- scripts/cluster_network.py | 296 +++++++---- scripts/make_summary.py | 376 +++++++++----- scripts/plot_network.py | 388 ++++++++++----- scripts/plot_p_nom_max.py | 43 +- scripts/plot_summary.py | 102 ++-- scripts/prepare_links_p_nom.py | 51 +- scripts/prepare_network.py | 165 +++--- scripts/retrieve_databundle.py | 16 +- scripts/simplify_network.py | 309 ++++++++---- scripts/solve_network.py | 283 +++++++---- scripts/solve_operations_network.py | 97 ++-- test/config.test1.yaml | 98 ++-- 37 files changed, 3837 insertions(+), 2456 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 5b64d2427..d8c043821 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,5 @@ blank_issues_enabled: false contact_links: - - name: PyPSA Mailing List - url: https://groups.google.com/forum/#!forum/pypsa - about: Please ask and answer general usage questions here. +- name: PyPSA Mailing List + url: https://groups.google.com/forum/#!forum/pypsa + about: Please ask and answer general usage questions here. diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b0699d749..c8a605630 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -12,7 +12,7 @@ on: branches: - master schedule: - - cron: "0 5 * * TUE" + - cron: "0 5 * * TUE" jobs: build: @@ -35,18 +35,18 @@ jobs: - uses: actions/checkout@v2 - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2.1.1 + uses: conda-incubator/setup-miniconda@v2.1.1 with: # checks out environment 'test' by default - mamba-version: "*" - channels: conda-forge,defaults - channel-priority: true + mamba-version: "*" + channels: conda-forge,defaults + channel-priority: true - - name: Install dependencies + - name: Install dependencies run: | echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml mamba env update -f envs/environment.yaml --name test - + - name: Test snakemake workflow run: | conda list diff --git a/.readthedocs.yml b/.readthedocs.yml index d6b81a401..9108ff644 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,5 +7,5 @@ version: 2 python: version: 3.8 install: - - requirements: doc/requirements.txt + - requirements: doc/requirements.txt system_packages: true diff --git a/.syncignore-receive b/.syncignore-receive index 717245c35..8a9f7d103 100644 --- a/.syncignore-receive +++ b/.syncignore-receive @@ -16,4 +16,4 @@ notebooks doc cutouts data/bundle -*.nc \ No newline at end of file +*.nc diff --git a/Snakefile b/Snakefile index cb50e3bff..cbfe8c549 100644 --- a/Snakefile +++ b/Snakefile @@ -6,180 +6,262 @@ from os.path import normpath, exists from shutil import copyfile from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider + HTTP = HTTPRemoteProvider() if not exists("config.yaml"): copyfile("config.default.yaml", "config.yaml") + configfile: "config.yaml" -COSTS="data/costs.csv" -ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4) + +COSTS = "data/costs.csv" +ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4) wildcard_constraints: simpl="[a-zA-Z0-9]*|all", clusters="[0-9]+m?|all", ll="(v|c)([0-9\.]+|opt|all)|all", - opts="[-+a-zA-Z0-9\.]*" + opts="[-+a-zA-Z0-9\.]*", rule cluster_all_networks: - input: expand("networks/elec_s{simpl}_{clusters}.nc", **config['scenario']) + input: + expand("networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]), rule extra_components_all_networks: - input: expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config['scenario']) + input: + expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config["scenario"]), rule prepare_all_networks: - input: expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) + input: + expand( + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + **config["scenario"] + ), rule solve_all_networks: - input: expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) + input: + expand( + "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + **config["scenario"] + ), + +if config["enable"].get("prepare_links_p_nom", False): -if config['enable'].get('prepare_links_p_nom', False): rule prepare_links_p_nom: - output: 'data/links_p_nom.csv' - log: 'logs/prepare_links_p_nom.log' + output: + "data/links_p_nom.csv", + log: + "logs/prepare_links_p_nom.log", threads: 1 - resources: mem=500 - script: 'scripts/prepare_links_p_nom.py' - - -datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls', - 'eez/World_EEZ_v8_2014.shp', 'EIA_hydro_generation_2000_2014.csv', - 'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp', - 'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz', - 'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif'] - - -if not config.get('tutorial', False): + resources: + mem=500, + script: + "scripts/prepare_links_p_nom.py" + + +datafiles = [ + "ch_cantons.csv", + "je-e-21.03.02.xls", + "eez/World_EEZ_v8_2014.shp", + "EIA_hydro_generation_2000_2014.csv", + "hydro_capacities.csv", + "naturalearth/ne_10m_admin_0_countries.shp", + "NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp", + "nama_10r_3popgdp.tsv.gz", + "nama_10r_3gdp.tsv.gz", + "corine/g250_clc06_V18_5.tif", +] + + +if not config.get("tutorial", False): datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"]) -if config['enable'].get('retrieve_databundle', True): +if config["enable"].get("retrieve_databundle", True): + rule retrieve_databundle: - output: expand('data/bundle/{file}', file=datafiles) - log: "logs/retrieve_databundle.log" - script: 'scripts/retrieve_databundle.py' + output: + expand("data/bundle/{file}", file=datafiles), + log: + "logs/retrieve_databundle.log", + script: + "scripts/retrieve_databundle.py" rule retrieve_load_data: - input: HTTP.remote("data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", keep_local=True, static=True) - output: "data/load_raw.csv" - shell: "mv {input} {output}" + input: + HTTP.remote( + "data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", + keep_local=True, + static=True, + ), + output: + "data/load_raw.csv", + shell: + "mv {input} {output}" rule build_load_data: - input: "data/load_raw.csv" - output: "resources/load.csv" - log: "logs/build_load_data.log" - script: 'scripts/build_load_data.py' - + input: + "data/load_raw.csv", + output: + "resources/load.csv", + log: + "logs/build_load_data.log", + script: + "scripts/build_load_data.py" + rule build_powerplants: input: base_network="networks/base.nc", - custom_powerplants="data/custom_powerplants.csv" - output: "resources/powerplants.csv" - log: "logs/build_powerplants.log" + custom_powerplants="data/custom_powerplants.csv", + output: + "resources/powerplants.csv", + log: + "logs/build_powerplants.log", threads: 1 - resources: mem=500 - script: "scripts/build_powerplants.py" + resources: + mem=500, + script: + "scripts/build_powerplants.py" rule base_network: input: - eg_buses='data/entsoegridkit/buses.csv', - eg_lines='data/entsoegridkit/lines.csv', - eg_links='data/entsoegridkit/links.csv', - eg_converters='data/entsoegridkit/converters.csv', - eg_transformers='data/entsoegridkit/transformers.csv', - parameter_corrections='data/parameter_corrections.yaml', - links_p_nom='data/links_p_nom.csv', - links_tyndp='data/links_tyndp.csv', - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - europe_shape='resources/europe_shape.geojson' - output: "networks/base.nc" - log: "logs/base_network.log" - benchmark: "benchmarks/base_network" + eg_buses="data/entsoegridkit/buses.csv", + eg_lines="data/entsoegridkit/lines.csv", + eg_links="data/entsoegridkit/links.csv", + eg_converters="data/entsoegridkit/converters.csv", + eg_transformers="data/entsoegridkit/transformers.csv", + parameter_corrections="data/parameter_corrections.yaml", + links_p_nom="data/links_p_nom.csv", + links_tyndp="data/links_tyndp.csv", + country_shapes="resources/country_shapes.geojson", + offshore_shapes="resources/offshore_shapes.geojson", + europe_shape="resources/europe_shape.geojson", + output: + "networks/base.nc", + log: + "logs/base_network.log", + benchmark: + "benchmarks/base_network" threads: 1 - resources: mem=500 - script: "scripts/base_network.py" + resources: + mem=500, + script: + "scripts/base_network.py" rule build_shapes: input: - naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp', - eez='data/bundle/eez/World_EEZ_v8_2014.shp', - nuts3='data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', - nuts3pop='data/bundle/nama_10r_3popgdp.tsv.gz', - nuts3gdp='data/bundle/nama_10r_3gdp.tsv.gz', - ch_cantons='data/bundle/ch_cantons.csv', - ch_popgdp='data/bundle/je-e-21.03.02.xls' + naturalearth="data/bundle/naturalearth/ne_10m_admin_0_countries.shp", + eez="data/bundle/eez/World_EEZ_v8_2014.shp", + nuts3="data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp", + nuts3pop="data/bundle/nama_10r_3popgdp.tsv.gz", + nuts3gdp="data/bundle/nama_10r_3gdp.tsv.gz", + ch_cantons="data/bundle/ch_cantons.csv", + ch_popgdp="data/bundle/je-e-21.03.02.xls", output: - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - europe_shape='resources/europe_shape.geojson', - nuts3_shapes='resources/nuts3_shapes.geojson' - log: "logs/build_shapes.log" + country_shapes="resources/country_shapes.geojson", + offshore_shapes="resources/offshore_shapes.geojson", + europe_shape="resources/europe_shape.geojson", + nuts3_shapes="resources/nuts3_shapes.geojson", + log: + "logs/build_shapes.log", threads: 1 - resources: mem=500 - script: "scripts/build_shapes.py" + resources: + mem=500, + script: + "scripts/build_shapes.py" rule build_bus_regions: input: - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - base_network="networks/base.nc" + country_shapes="resources/country_shapes.geojson", + offshore_shapes="resources/offshore_shapes.geojson", + base_network="networks/base.nc", output: regions_onshore="resources/regions_onshore.geojson", - regions_offshore="resources/regions_offshore.geojson" - log: "logs/build_bus_regions.log" + regions_offshore="resources/regions_offshore.geojson", + log: + "logs/build_bus_regions.log", threads: 1 - resources: mem=1000 - script: "scripts/build_bus_regions.py" + resources: + mem=1000, + script: + "scripts/build_bus_regions.py" + + +if config["enable"].get("build_cutout", False): -if config['enable'].get('build_cutout', False): rule build_cutout: - input: + input: regions_onshore="resources/regions_onshore.geojson", - regions_offshore="resources/regions_offshore.geojson" - output: "cutouts/{cutout}.nc" - log: "logs/build_cutout/{cutout}.log" - benchmark: "benchmarks/build_cutout_{cutout}" + regions_offshore="resources/regions_offshore.geojson", + output: + "cutouts/{cutout}.nc", + log: + "logs/build_cutout/{cutout}.log", + benchmark: + "benchmarks/build_cutout_{cutout}" threads: ATLITE_NPROCESSES - resources: mem=ATLITE_NPROCESSES * 1000 - script: "scripts/build_cutout.py" + resources: + mem=ATLITE_NPROCESSES * 1000, + script: + "scripts/build_cutout.py" + +if config["enable"].get("retrieve_cutout", True): -if config['enable'].get('retrieve_cutout', True): rule retrieve_cutout: - input: HTTP.remote("zenodo.org/record/4709858/files/{cutout}.nc", keep_local=True, static=True) - output: "cutouts/{cutout}.nc" - shell: "mv {input} {output}" + input: + HTTP.remote( + "zenodo.org/record/4709858/files/{cutout}.nc", + keep_local=True, + static=True, + ), + output: + "cutouts/{cutout}.nc", + shell: + "mv {input} {output}" + +if config["enable"].get("build_natura_raster", False): -if config['enable'].get('build_natura_raster', False): rule build_natura_raster: input: natura="data/bundle/natura/Natura2000_end2015.shp", - cutouts=expand("cutouts/{cutouts}.nc", **config['atlite']) - output: "resources/natura.tiff" - log: "logs/build_natura_raster.log" - script: "scripts/build_natura_raster.py" + cutouts=expand("cutouts/{cutouts}.nc", **config["atlite"]), + output: + "resources/natura.tiff", + log: + "logs/build_natura_raster.log", + script: + "scripts/build_natura_raster.py" -if config['enable'].get('retrieve_natura_raster', True): +if config["enable"].get("retrieve_natura_raster", True): + rule retrieve_natura_raster: - input: HTTP.remote("zenodo.org/record/4706686/files/natura.tiff", keep_local=True, static=True) - output: "resources/natura.tiff" - shell: "mv {input} {output}" + input: + HTTP.remote( + "zenodo.org/record/4706686/files/natura.tiff", + keep_local=True, + static=True, + ), + output: + "resources/natura.tiff", + shell: + "mv {input} {output}" rule build_renewable_profiles: @@ -187,175 +269,238 @@ rule build_renewable_profiles: base_network="networks/base.nc", corine="data/bundle/corine/g250_clc06_V18_5.tif", natura="resources/natura.tiff", - gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][w.technology].keys() - else []), - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - regions=lambda w: ("resources/regions_onshore.geojson" - if w.technology in ('onwind', 'solar') - else "resources/regions_offshore.geojson"), - cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] + ".nc" - output: profile="resources/profile_{technology}.nc", - log: "logs/build_renewable_profile_{technology}.log" - benchmark: "benchmarks/build_renewable_profiles_{technology}" + gebco=lambda w: ( + "data/bundle/GEBCO_2014_2D.nc" + if "max_depth" in config["renewable"][w.technology].keys() + else [] + ), + country_shapes="resources/country_shapes.geojson", + offshore_shapes="resources/offshore_shapes.geojson", + regions=lambda w: ( + "resources/regions_onshore.geojson" + if w.technology in ("onwind", "solar") + else "resources/regions_offshore.geojson" + ), + cutout=lambda w: "cutouts/" + + config["renewable"][w.technology]["cutout"] + + ".nc", + output: + profile="resources/profile_{technology}.nc", + log: + "logs/build_renewable_profile_{technology}.log", + benchmark: + "benchmarks/build_renewable_profiles_{technology}" threads: ATLITE_NPROCESSES - resources: mem=ATLITE_NPROCESSES * 5000 - script: "scripts/build_renewable_profiles.py" + resources: + mem=ATLITE_NPROCESSES * 5000, + script: + "scripts/build_renewable_profiles.py" -if 'hydro' in config['renewable'].keys(): +if "hydro" in config["renewable"].keys(): + rule build_hydro_profile: input: - country_shapes='resources/country_shapes.geojson', - eia_hydro_generation='data/bundle/EIA_hydro_generation_2000_2014.csv', - cutout="cutouts/" + config["renewable"]['hydro']['cutout'] + ".nc" - output: 'resources/profile_hydro.nc' - log: "logs/build_hydro_profile.log" - resources: mem=5000 - script: 'scripts/build_hydro_profile.py' + country_shapes="resources/country_shapes.geojson", + eia_hydro_generation="data/bundle/EIA_hydro_generation_2000_2014.csv", + cutout="cutouts/" + config["renewable"]["hydro"]["cutout"] + ".nc", + output: + "resources/profile_hydro.nc", + log: + "logs/build_hydro_profile.log", + resources: + mem=5000, + script: + "scripts/build_hydro_profile.py" rule add_electricity: input: - base_network='networks/base.nc', + **{ + f"profile_{tech}": f"resources/profile_{tech}.nc" + for tech in config["renewable"] + }, + base_network="networks/base.nc", tech_costs=COSTS, regions="resources/regions_onshore.geojson", - powerplants='resources/powerplants.csv', - hydro_capacities='data/bundle/hydro_capacities.csv', - geth_hydro_capacities='data/geth2015_hydro_capacities.csv', - load='resources/load.csv', - nuts3_shapes='resources/nuts3_shapes.geojson', - **{f"profile_{tech}": f"resources/profile_{tech}.nc" - for tech in config['renewable']} - output: "networks/elec.nc" - log: "logs/add_electricity.log" - benchmark: "benchmarks/add_electricity" + powerplants="resources/powerplants.csv", + hydro_capacities="data/bundle/hydro_capacities.csv", + geth_hydro_capacities="data/geth2015_hydro_capacities.csv", + load="resources/load.csv", + nuts3_shapes="resources/nuts3_shapes.geojson", + output: + "networks/elec.nc", + log: + "logs/add_electricity.log", + benchmark: + "benchmarks/add_electricity" threads: 1 - resources: mem=3000 - script: "scripts/add_electricity.py" + resources: + mem=3000, + script: + "scripts/add_electricity.py" rule simplify_network: input: - network='networks/elec.nc', + network="networks/elec.nc", tech_costs=COSTS, regions_onshore="resources/regions_onshore.geojson", - regions_offshore="resources/regions_offshore.geojson" + regions_offshore="resources/regions_offshore.geojson", output: - network='networks/elec_s{simpl}.nc', + network="networks/elec_s{simpl}.nc", regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", - busmap='resources/busmap_elec_s{simpl}.csv', - connection_costs='resources/connection_costs_s{simpl}.csv' - log: "logs/simplify_network/elec_s{simpl}.log" - benchmark: "benchmarks/simplify_network/elec_s{simpl}" + busmap="resources/busmap_elec_s{simpl}.csv", + connection_costs="resources/connection_costs_s{simpl}.csv", + log: + "logs/simplify_network/elec_s{simpl}.log", + benchmark: + "benchmarks/simplify_network/elec_s{simpl}" threads: 1 - resources: mem=4000 - script: "scripts/simplify_network.py" + resources: + mem=4000, + script: + "scripts/simplify_network.py" rule cluster_network: input: - network='networks/elec_s{simpl}.nc', + network="networks/elec_s{simpl}.nc", regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", - busmap=ancient('resources/busmap_elec_s{simpl}.csv'), - custom_busmap=("data/custom_busmap_elec_s{simpl}_{clusters}.csv" - if config["enable"].get("custom_busmap", False) else []), - tech_costs=COSTS + busmap=ancient("resources/busmap_elec_s{simpl}.csv"), + custom_busmap=( + "data/custom_busmap_elec_s{simpl}_{clusters}.csv" + if config["enable"].get("custom_busmap", False) + else [] + ), + tech_costs=COSTS, output: - network='networks/elec_s{simpl}_{clusters}.nc', + network="networks/elec_s{simpl}_{clusters}.nc", regions_onshore="resources/regions_onshore_elec_s{simpl}_{clusters}.geojson", regions_offshore="resources/regions_offshore_elec_s{simpl}_{clusters}.geojson", busmap="resources/busmap_elec_s{simpl}_{clusters}.csv", - linemap="resources/linemap_elec_s{simpl}_{clusters}.csv" - log: "logs/cluster_network/elec_s{simpl}_{clusters}.log" - benchmark: "benchmarks/cluster_network/elec_s{simpl}_{clusters}" + linemap="resources/linemap_elec_s{simpl}_{clusters}.csv", + log: + "logs/cluster_network/elec_s{simpl}_{clusters}.log", + benchmark: + "benchmarks/cluster_network/elec_s{simpl}_{clusters}" threads: 1 - resources: mem=3000 - script: "scripts/cluster_network.py" + resources: + mem=3000, + script: + "scripts/cluster_network.py" rule add_extra_components: input: - network='networks/elec_s{simpl}_{clusters}.nc', + network="networks/elec_s{simpl}_{clusters}.nc", tech_costs=COSTS, - output: 'networks/elec_s{simpl}_{clusters}_ec.nc' - log: "logs/add_extra_components/elec_s{simpl}_{clusters}.log" - benchmark: "benchmarks/add_extra_components/elec_s{simpl}_{clusters}_ec" + output: + "networks/elec_s{simpl}_{clusters}_ec.nc", + log: + "logs/add_extra_components/elec_s{simpl}_{clusters}.log", + benchmark: + "benchmarks/add_extra_components/elec_s{simpl}_{clusters}_ec" threads: 1 - resources: mem=3000 - script: "scripts/add_extra_components.py" + resources: + mem=3000, + script: + "scripts/add_extra_components.py" rule prepare_network: - input: 'networks/elec_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS - output: 'networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' - log: "logs/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" - benchmark: "benchmarks/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + input: + "networks/elec_s{simpl}_{clusters}_ec.nc", + tech_costs=COSTS, + output: + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + log: + "logs/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log", + benchmark: + "benchmarks/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 1 - resources: mem=4000 - script: "scripts/prepare_network.py" + resources: + mem=4000, + script: + "scripts/prepare_network.py" def memory(w): - factor = 3. - for o in w.opts.split('-'): - m = re.match(r'^(\d+)h$', o, re.IGNORECASE) + factor = 3.0 + for o in w.opts.split("-"): + m = re.match(r"^(\d+)h$", o, re.IGNORECASE) if m is not None: factor /= int(m.group(1)) break - for o in w.opts.split('-'): - m = re.match(r'^(\d+)seg$', o, re.IGNORECASE) + for o in w.opts.split("-"): + m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) if m is not None: factor *= int(m.group(1)) / 8760 break - if w.clusters.endswith('m'): + if w.clusters.endswith("m"): return int(factor * (18000 + 180 * int(w.clusters[:-1]))) else: return int(factor * (10000 + 195 * int(w.clusters))) rule solve_network: - input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" + input: + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + output: + "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", log: - solver=normpath("logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"), + solver=normpath( + "logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + ), python="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", - memory="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log" - benchmark: "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + memory="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", + benchmark: + "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 4 - resources: mem=memory - shadow: "shallow" - script: "scripts/solve_network.py" + resources: + mem=memory, + shadow: + "shallow" + script: + "scripts/solve_network.py" rule solve_operations_network: input: unprepared="networks/elec_s{simpl}_{clusters}_ec.nc", - optimized="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc" + optimized="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + output: + "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc", log: - solver=normpath("logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"), + solver=normpath( + "logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log" + ), python="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", - memory="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log" - benchmark: "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + memory="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log", + benchmark: + "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 4 - resources: mem=(lambda w: 5000 + 372 * int(w.clusters)) - shadow: "shallow" - script: "scripts/solve_operations_network.py" + resources: + mem=(lambda w: 5000 + 372 * int(w.clusters)), + shadow: + "shallow" + script: + "scripts/solve_operations_network.py" rule plot_network: input: network="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - tech_costs=COSTS + tech_costs=COSTS, output: only_map="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}", - ext="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}" - log: "logs/plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log" - script: "scripts/plot_network.py" + ext="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}", + log: + "logs/plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log", + script: + "scripts/plot_network.py" def input_make_summary(w): @@ -366,36 +511,57 @@ def input_make_summary(w): ll = [l for l in ll if l[0] == w.ll[0]] else: ll = w.ll - return ([COSTS] + - expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - ll=ll, - **{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) - for k in ["simpl", "clusters", "opts"]})) + return [COSTS] + expand( + "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + ll=ll, + **{ + k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) + for k in ["simpl", "clusters", "opts"] + } + ) rule make_summary: - input: input_make_summary - output: directory("results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}") - log: "logs/make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", - script: "scripts/make_summary.py" + input: + input_make_summary, + output: + directory( + "results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" + ), + log: + "logs/make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", + script: + "scripts/make_summary.py" rule plot_summary: - input: "results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" - output: "results/plots/summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}" - log: "logs/plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log" - script: "scripts/plot_summary.py" + input: + "results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}", + output: + "results/plots/summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}", + log: + "logs/plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log", + script: + "scripts/plot_summary.py" def input_plot_p_nom_max(w): - return [("networks/elec_s{simpl}{maybe_cluster}.nc" - .format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w)) - for c in w.clusts.split(",")] + return [ + ( + "networks/elec_s{simpl}{maybe_cluster}.nc".format( + maybe_cluster=("" if c == "full" else ("_" + c)), **w + ) + ) + for c in w.clusts.split(",") + ] rule plot_p_nom_max: - input: input_plot_p_nom_max - output: "results/plots/elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}" - log: "logs/plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" - script: "scripts/plot_p_nom_max.py" - + input: + input_plot_p_nom_max, + output: + "results/plots/elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}", + log: + "logs/plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log", + script: + "scripts/plot_p_nom_max.py" diff --git a/config.default.yaml b/config.default.yaml index f70e7c2c3..ab3f1b614 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -67,27 +67,27 @@ atlite: nprocesses: 4 cutouts: # use 'base' to determine geographical bounds and time span from config - # base: - # module: era5 + # base: + # module: era5 europe-2013-era5: - module: era5 # in priority order + module: era5 # in priority order x: [-12., 35.] y: [33., 72] dx: 0.3 dy: 0.3 time: ['2013', '2013'] europe-2013-sarah: - module: [sarah, era5] # in priority order + module: [sarah, era5] # in priority order x: [-12., 45.] y: [33., 65] dx: 0.2 dy: 0.2 time: ['2013', '2013'] sarah_interpolate: false - sarah_dir: + sarah_dir: features: [influx, temperature] - + renewable: onwind: cutout: europe-2013-era5 @@ -99,8 +99,7 @@ renewable: corine: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # development of the REMix model and application for Europe. ( p.42 / p.28) - grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 31, 32] + grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] natura: true @@ -154,8 +153,7 @@ renewable: # sector: The economic potential of photovoltaics and concentrating solar # power." Applied Energy 135 (2014): 704-720. correction_factor: 0.854337 - corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] natura: true potential: simple # or conservative clip_p_max_pu: 1.e-2 @@ -188,9 +186,9 @@ transformers: type: '' load: - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly - time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false scaling_factor: 1.0 @@ -243,7 +241,7 @@ solving: plotting: map: figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] + boundaries: [-10.2, 29, 35, 72] p_nom: bus_size_factor: 5.e+4 linewidth_factor: 3.e+3 @@ -262,50 +260,50 @@ plotting: AC_carriers: ["AC line", "AC transformer"] link_carriers: ["DC line", "Converter AC-DC"] tech_colors: - "onwind" : "#235ebc" - "onshore wind" : "#235ebc" - 'offwind' : "#6895dd" - 'offwind-ac' : "#6895dd" - 'offshore wind' : "#6895dd" - 'offshore wind ac' : "#6895dd" - 'offwind-dc' : "#74c6f2" - 'offshore wind dc' : "#74c6f2" - "hydro" : "#08ad97" - "hydro+PHS" : "#08ad97" - "PHS" : "#08ad97" - "hydro reservoir" : "#08ad97" - 'hydroelectricity' : '#08ad97' - "ror" : "#4adbc8" - "run of river" : "#4adbc8" - 'solar' : "#f9d002" - 'solar PV' : "#f9d002" - 'solar thermal' : '#ffef60' - 'biomass' : '#0c6013' - 'solid biomass' : '#06540d' - 'biogas' : '#23932d' - 'waste' : '#68896b' - 'geothermal' : '#ba91b1' - "OCGT" : "#d35050" - "gas" : "#d35050" - "natural gas" : "#d35050" - "CCGT" : "#b20101" - "nuclear" : "#ff9000" - "coal" : "#707070" - "lignite" : "#9e5a01" - "oil" : "#262626" - "H2" : "#ea048a" - "hydrogen storage" : "#ea048a" - "battery" : "#b8ea04" - "Electric load" : "#f9d002" - "electricity" : "#f9d002" - "lines" : "#70af1d" - "transmission lines" : "#70af1d" - "AC-AC" : "#70af1d" - "AC line" : "#70af1d" - "links" : "#8a1caf" - "HVDC links" : "#8a1caf" - "DC-DC" : "#8a1caf" - "DC link" : "#8a1caf" + "onwind": "#235ebc" + "onshore wind": "#235ebc" + 'offwind': "#6895dd" + 'offwind-ac': "#6895dd" + 'offshore wind': "#6895dd" + 'offshore wind ac': "#6895dd" + 'offwind-dc': "#74c6f2" + 'offshore wind dc': "#74c6f2" + "hydro": "#08ad97" + "hydro+PHS": "#08ad97" + "PHS": "#08ad97" + "hydro reservoir": "#08ad97" + 'hydroelectricity': '#08ad97' + "ror": "#4adbc8" + "run of river": "#4adbc8" + 'solar': "#f9d002" + 'solar PV': "#f9d002" + 'solar thermal': '#ffef60' + 'biomass': '#0c6013' + 'solid biomass': '#06540d' + 'biogas': '#23932d' + 'waste': '#68896b' + 'geothermal': '#ba91b1' + "OCGT": "#d35050" + "gas": "#d35050" + "natural gas": "#d35050" + "CCGT": "#b20101" + "nuclear": "#ff9000" + "coal": "#707070" + "lignite": "#9e5a01" + "oil": "#262626" + "H2": "#ea048a" + "hydrogen storage": "#ea048a" + "battery": "#b8ea04" + "Electric load": "#f9d002" + "electricity": "#f9d002" + "lines": "#70af1d" + "transmission lines": "#70af1d" + "AC-AC": "#70af1d" + "AC line": "#70af1d" + "links": "#8a1caf" + "HVDC links": "#8a1caf" + "DC-DC": "#8a1caf" + "DC link": "#8a1caf" nice_names: OCGT: "Open-Cycle Gas" CCGT: "Combined-Cycle Gas" diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 26ead2424..5ef9e8eb4 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -75,8 +75,7 @@ renewable: corine: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # development of the REMix model and application for Europe. ( p.42 / p.28) - grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 31, 32] + grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] natura: true @@ -122,8 +121,7 @@ renewable: # sector: The economic potential of photovoltaics and concentrating solar # power." Applied Energy 135 (2014): 704-720. correction_factor: 0.854337 - corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] natura: true potential: simple # or conservative clip_p_max_pu: 1.e-2 @@ -150,9 +148,9 @@ transformers: type: '' load: - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly - time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false scaling_factor: 1.0 @@ -185,7 +183,7 @@ solving: plotting: map: figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] + boundaries: [-10.2, 29, 35, 72] p_nom: bus_size_factor: 5.e+4 linewidth_factor: 3.e+3 @@ -204,50 +202,50 @@ plotting: AC_carriers: ["AC line", "AC transformer"] link_carriers: ["DC line", "Converter AC-DC"] tech_colors: - "onwind" : "#235ebc" - "onshore wind" : "#235ebc" - 'offwind' : "#6895dd" - 'offwind-ac' : "#6895dd" - 'offshore wind' : "#6895dd" - 'offshore wind ac' : "#6895dd" - 'offwind-dc' : "#74c6f2" - 'offshore wind dc' : "#74c6f2" - "hydro" : "#08ad97" - "hydro+PHS" : "#08ad97" - "PHS" : "#08ad97" - "hydro reservoir" : "#08ad97" - 'hydroelectricity' : '#08ad97' - "ror" : "#4adbc8" - "run of river" : "#4adbc8" - 'solar' : "#f9d002" - 'solar PV' : "#f9d002" - 'solar thermal' : '#ffef60' - 'biomass' : '#0c6013' - 'solid biomass' : '#06540d' - 'biogas' : '#23932d' - 'waste' : '#68896b' - 'geothermal' : '#ba91b1' - "OCGT" : "#d35050" - "gas" : "#d35050" - "natural gas" : "#d35050" - "CCGT" : "#b20101" - "nuclear" : "#ff9000" - "coal" : "#707070" - "lignite" : "#9e5a01" - "oil" : "#262626" - "H2" : "#ea048a" - "hydrogen storage" : "#ea048a" - "battery" : "#b8ea04" - "Electric load" : "#f9d002" - "electricity" : "#f9d002" - "lines" : "#70af1d" - "transmission lines" : "#70af1d" - "AC-AC" : "#70af1d" - "AC line" : "#70af1d" - "links" : "#8a1caf" - "HVDC links" : "#8a1caf" - "DC-DC" : "#8a1caf" - "DC link" : "#8a1caf" + "onwind": "#235ebc" + "onshore wind": "#235ebc" + 'offwind': "#6895dd" + 'offwind-ac': "#6895dd" + 'offshore wind': "#6895dd" + 'offshore wind ac': "#6895dd" + 'offwind-dc': "#74c6f2" + 'offshore wind dc': "#74c6f2" + "hydro": "#08ad97" + "hydro+PHS": "#08ad97" + "PHS": "#08ad97" + "hydro reservoir": "#08ad97" + 'hydroelectricity': '#08ad97' + "ror": "#4adbc8" + "run of river": "#4adbc8" + 'solar': "#f9d002" + 'solar PV': "#f9d002" + 'solar thermal': '#ffef60' + 'biomass': '#0c6013' + 'solid biomass': '#06540d' + 'biogas': '#23932d' + 'waste': '#68896b' + 'geothermal': '#ba91b1' + "OCGT": "#d35050" + "gas": "#d35050" + "natural gas": "#d35050" + "CCGT": "#b20101" + "nuclear": "#ff9000" + "coal": "#707070" + "lignite": "#9e5a01" + "oil": "#262626" + "H2": "#ea048a" + "hydrogen storage": "#ea048a" + "battery": "#b8ea04" + "Electric load": "#f9d002" + "electricity": "#f9d002" + "lines": "#70af1d" + "transmission lines": "#70af1d" + "AC-AC": "#70af1d" + "AC line": "#70af1d" + "links": "#8a1caf" + "HVDC links": "#8a1caf" + "DC-DC": "#8a1caf" + "DC link": "#8a1caf" nice_names: OCGT: "Open-Cycle Gas" CCGT: "Combined-Cycle Gas" diff --git a/data/parameter_corrections.yaml b/data/parameter_corrections.yaml index 67b73ec19..be8d2d42d 100644 --- a/data/parameter_corrections.yaml +++ b/data/parameter_corrections.yaml @@ -34,12 +34,12 @@ Link: "12998": "1333" # combine link 12998 + 12997 in 12998 "5627": '2309' # combine link 5627 + 5628 in 5627 "8068": "5819" # fix GB location of Anglo-Scottish interconnector - length: + length: index: "12998": 409.0 "5627": 26.39 - bus0: - index: + bus0: + index: "14552": "5819" # fix GB location of GB-IE interconnector "5628": "7276" # bus0 == bus1 to remove link in remove_unconnected_components "12997": "7276" # bus0 == bus1 to remove link in remove_unconnected_components diff --git a/doc/_static/theme_overrides.css b/doc/_static/theme_overrides.css index a4c9818d7..827dbbade 100644 --- a/doc/_static/theme_overrides.css +++ b/doc/_static/theme_overrides.css @@ -71,4 +71,4 @@ .wy-nav-content { max-width: 910px !important; } -} \ No newline at end of file +} diff --git a/doc/conf.py b/doc/conf.py index 01dd6bc8d..c2004235e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: 20017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -23,12 +24,12 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../scripts')) +sys.path.insert(0, os.path.abspath("../scripts")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -36,47 +37,47 @@ extensions = [ #'sphinx.ext.autodoc', #'sphinx.ext.autosummary', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.graphviz', + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx.ext.graphviz", #'sphinx.ext.pngmath', #'sphinxcontrib.tikz', #'rinoh.frontend.sphinx', - 'sphinx.ext.imgconverter', # for SVG conversion + "sphinx.ext.imgconverter", # for SVG conversion ] -autodoc_default_flags = ['members'] +autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'PyPSA-Eur' -copyright = u'2017-2020 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS); 2019-2020 Fabian Neumann (KIT)' -author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS), Fabian Neumann (KIT)' +project = u"PyPSA-Eur" +copyright = u"2017-2020 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS); 2019-2020 Fabian Neumann (KIT)" +author = u"Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberger (FIAS), Tom Brown (KIT, FIAS), Fabian Neumann (KIT)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'0.3' +version = u"0.3" # The full version, including alpha/beta/rc tags. -release = u'0.4.0' +release = u"0.4.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -87,37 +88,37 @@ # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -127,35 +128,35 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { - 'display_version': True, - 'sticky_navigation': True, + "display_version": True, + "sticky_navigation": True, } # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # These folders are copied to the documentation's HTML output html_static_path = ["_static"] @@ -167,130 +168,127 @@ # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyPSAEurdoc' +htmlhelp_basename = "PyPSAEurdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'PyPSA-Eur.tex', u'PyPSA-Eur Documentation', - u'author', 'manual'), + (master_doc, "PyPSA-Eur.tex", u"PyPSA-Eur Documentation", u"author", "manual"), ] -#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html -rinoh_documents = [(master_doc, # top-level file (index.rst) - 'PyPSA-Eur', # output (target.pdf) - 'PyPSA-Eur Documentation', # document title - 'author')] # document author +# Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html +rinoh_documents = [ + ( + master_doc, # top-level file (index.rst) + "PyPSA-Eur", # output (target.pdf) + "PyPSA-Eur Documentation", # document title + "author", + ) +] # document author # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pypsa-eur', u'PyPSA-Eur Documentation', - [author], 1) -] +man_pages = [(master_doc, "pypsa-eur", u"PyPSA-Eur Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -299,23 +297,29 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'PyPSA-Eur', u'PyPSA-Eur Documentation', - author, 'PyPSA-Eur', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "PyPSA-Eur", + u"PyPSA-Eur Documentation", + author, + "PyPSA-Eur", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {"https://docs.python.org/": None} diff --git a/doc/requirements.txt b/doc/requirements.txt index 2b4617183..9f27cef18 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -18,4 +18,4 @@ pyyaml seaborn memory_profiler tables -descartes \ No newline at end of file +descartes diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index 3fe3d51af..a85830fb8 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -4,308 +4,308 @@ name: pypsa-eur channels: - - bioconda - - conda-forge - - defaults +- bioconda +- conda-forge +- defaults dependencies: - - _libgcc_mutex=0.1 - - _openmp_mutex=4.5 - - affine=2.3.0 - - alsa-lib=1.2.3 - - amply=0.1.4 - - appdirs=1.4.4 - - atlite=0.2.5 - - attrs=21.2.0 - - backcall=0.2.0 - - backports=1.0 - - backports.functools_lru_cache=1.6.4 - - beautifulsoup4=4.10.0 - - blosc=1.21.0 - - bokeh=2.3.3 - - boost-cpp=1.74.0 - - bottleneck=1.3.2 - - brotlipy=0.7.0 - - bzip2=1.0.8 - - c-ares=1.17.2 - - ca-certificates=2021.5.30 - - cairo=1.16.0 - - cartopy=0.19.0.post1 - - cdsapi=0.5.1 - - certifi=2021.5.30 - - cffi=1.14.6 - - cfitsio=3.470 - - cftime=1.5.0 - - chardet=4.0.0 - - charset-normalizer=2.0.0 - - click=7.1.2 - - click-plugins=1.1.1 - - cligj=0.7.2 - - cloudpickle=2.0.0 - - coincbc=2.10.5 - - colorama=0.4.4 - - conda=4.10.3 - - conda-package-handling=1.7.3 - - configargparse=1.5.2 - - connection_pool=0.0.3 - - country_converter=0.7.3 - - cryptography=3.4.7 - - curl=7.79.0 - - cycler=0.10.0 - - cytoolz=0.11.0 - - dask=2021.3.1 - - dask-core=2021.3.1 - - datrie=0.8.2 - - dbus=1.13.6 - - decorator=4.4.2 - - deprecation=2.1.0 - - descartes=1.1.0 - - distributed=2021.4.1 - - distro=1.5.0 - - docutils=0.17.1 - - entsoe-py=0.3.7 - - et_xmlfile=1.0.1 - - expat=2.4.1 - - filelock=3.0.12 - - fiona=1.8.18 - - fontconfig=2.13.1 - - freetype=2.10.4 - - freexl=1.0.6 - - fsspec=2021.8.1 - - gdal=3.2.1 - - geographiclib=1.52 - - geopandas=0.9.0 - - geopandas-base=0.9.0 - - geopy=2.2.0 - - geos=3.9.1 - - geotiff=1.6.0 - - gettext=0.19.8.1 - - giflib=5.2.1 - - gitdb=4.0.7 - - gitpython=3.1.23 - - glib=2.68.4 - - glib-tools=2.68.4 - - graphite2=1.3.13 - - gst-plugins-base=1.18.5 - - gstreamer=1.18.5 - - harfbuzz=2.9.1 - - hdf4=4.2.15 - - hdf5=1.10.6 - - heapdict=1.0.1 - - icu=68.1 - - idna=3.1 - - importlib-metadata=4.8.1 - - iniconfig=1.1.1 - - ipython=7.27.0 - - ipython_genutils=0.2.0 - - jdcal=1.4.1 - - jedi=0.18.0 - - jinja2=3.0.1 - - joblib=1.0.1 - - jpeg=9d - - json-c=0.15 - - jsonschema=3.2.0 - - jupyter_core=4.8.1 - - kealib=1.4.14 - - kiwisolver=1.3.2 - - krb5=1.19.2 - - lcms2=2.12 - - ld_impl_linux-64=2.36.1 - - libarchive=3.5.1 - - libblas=3.9.0 - - libcblas=3.9.0 - - libclang=11.1.0 - - libcurl=7.79.0 - - libdap4=3.20.6 - - libedit=3.1.20191231 - - libev=4.33 - - libevent=2.1.10 - - libffi=3.4.2 - - libgcc-ng=11.2.0 - - libgdal=3.2.1 - - libgfortran-ng=11.2.0 - - libgfortran5=11.2.0 - - libglib=2.68.4 - - libgomp=11.2.0 - - libiconv=1.16 - - libkml=1.3.0 - - liblapack=3.9.0 - - libllvm11=11.1.0 - - libnetcdf=4.7.4 - - libnghttp2=1.43.0 - - libogg=1.3.4 - - libopenblas=0.3.17 - - libopus=1.3.1 - - libpng=1.6.37 - - libpq=13.3 - - librttopo=1.1.0 - - libsolv=0.7.19 - - libspatialindex=1.9.3 - - libspatialite=5.0.1 - - libssh2=1.10.0 - - libstdcxx-ng=11.2.0 - - libtiff=4.2.0 - - libuuid=2.32.1 - - libvorbis=1.3.7 - - libwebp-base=1.2.1 - - libxcb=1.13 - - libxkbcommon=1.0.3 - - libxml2=2.9.12 - - libxslt=1.1.33 - - locket=0.2.0 - - lxml=4.6.3 - - lz4-c=1.9.3 - - lzo=2.10 - - mamba=0.15.3 - - mapclassify=2.4.3 - - markupsafe=2.0.1 - - matplotlib=3.4.3 - - matplotlib-base=3.4.3 - - matplotlib-inline=0.1.3 - - memory_profiler=0.58.0 - - mock=4.0.3 - - more-itertools=8.10.0 - - msgpack-python=1.0.2 - - munch=2.5.0 - - mysql-common=8.0.25 - - mysql-libs=8.0.25 - - nbformat=5.1.3 - - ncurses=6.2 - - netcdf4=1.5.6 - - networkx=2.6.3 - - nspr=4.30 - - nss=3.69 - - numexpr=2.7.3 - - numpy=1.21.2 - - olefile=0.46 - - openjdk=11.0.9.1 - - openjpeg=2.4.0 - - openpyxl=3.0.8 - - openssl=1.1.1l - - packaging=21.0 - - pandas=1.2.5 - - parso=0.8.2 - - partd=1.2.0 - - patsy=0.5.1 - - pcre=8.45 - - pexpect=4.8.0 - - pickleshare=0.7.5 - - pillow=8.2.0 - - pip=21.2.4 - - pixman=0.40.0 - - pluggy=1.0.0 - - ply=3.11 - - poppler=0.89.0 - - poppler-data=0.4.11 - - postgresql=13.3 - - powerplantmatching=0.4.8 - - progressbar2=3.53.1 - - proj=7.2.0 - - prompt-toolkit=3.0.20 - - psutil=5.8.0 - - pthread-stubs=0.4 - - ptyprocess=0.7.0 - - pulp=2.5.0 - - py=1.10.0 - - pycosat=0.6.3 - - pycountry=20.7.3 - - pycparser=2.20 - - pygments=2.10.0 - - pyomo=6.1.2 - - pyopenssl=20.0.1 - - pyparsing=2.4.7 - - pyproj=3.1.0 - - pypsa=0.18.0 - - pyqt=5.12.3 - - pyqt-impl=5.12.3 - - pyqt5-sip=4.19.18 - - pyqtchart=5.12 - - pyqtwebengine=5.12.1 - - pyrsistent=0.17.3 - - pyshp=2.1.3 - - pysocks=1.7.1 - - pytables=3.6.1 - - pytest=6.2.5 - - python=3.9.7 - - python-dateutil=2.8.2 - - python-utils=2.5.6 - - python_abi=3.9 - - pytz=2021.1 - - pyyaml=5.4.1 - - qt=5.12.9 - - rasterio=1.2.6 - - ratelimiter=1.2.0 - - readline=8.1 - - reproc=14.2.3 - - reproc-cpp=14.2.3 - - requests=2.26.0 - - rtree=0.9.7 - - ruamel_yaml=0.15.80 - - scikit-learn=0.24.2 - - scipy=1.7.1 - - seaborn=0.11.2 - - seaborn-base=0.11.2 - - setuptools=58.0.4 - - setuptools-scm=6.3.2 - - setuptools_scm=6.3.2 - - shapely=1.7.1 - - six=1.16.0 - - smart_open=5.2.1 - - smmap=3.0.5 - - snakemake-minimal=6.8.0 - - snuggs=1.4.7 - - sortedcontainers=2.4.0 - - soupsieve=2.0.1 - - sqlite=3.36.0 - - statsmodels=0.12.2 - - stopit=1.1.2 - - tabula-py=2.2.0 - - tabulate=0.8.9 - - tblib=1.7.0 - - threadpoolctl=2.2.0 - - tiledb=2.2.9 - - tk=8.6.11 - - toml=0.10.2 - - tomli=1.2.1 - - toolz=0.11.1 - - toposort=1.6 - - tornado=6.1 - - tqdm=4.62.3 - - traitlets=5.1.0 - - typing_extensions=3.10.0.2 - - tzcode=2021a - - tzdata=2021a - - urllib3=1.26.6 - - wcwidth=0.2.5 - - wheel=0.37.0 - - wrapt=1.12.1 - - xarray=0.19.0 - - xerces-c=3.2.3 - - xlrd=2.0.1 - - xorg-fixesproto=5.0 - - xorg-inputproto=2.3.2 - - xorg-kbproto=1.0.7 - - xorg-libice=1.0.10 - - xorg-libsm=1.2.3 - - xorg-libx11=1.7.2 - - xorg-libxau=1.0.9 - - xorg-libxdmcp=1.1.3 - - xorg-libxext=1.3.4 - - xorg-libxfixes=5.0.3 - - xorg-libxi=1.7.10 - - xorg-libxrender=0.9.10 - - xorg-libxtst=1.2.3 - - xorg-recordproto=1.14.2 - - xorg-renderproto=0.11.1 - - xorg-xextproto=7.3.0 - - xorg-xproto=7.0.31 - - xz=5.2.5 - - yaml=0.2.5 - - zict=2.0.0 - - zipp=3.5.0 - - zlib=1.2.11 - - zstd=1.4.9 - - pip: - - countrycode==0.2 - - sklearn==0.0 - - tsam==1.1.1 - - vresutils==0.3.1 +- _libgcc_mutex=0.1 +- _openmp_mutex=4.5 +- affine=2.3.0 +- alsa-lib=1.2.3 +- amply=0.1.4 +- appdirs=1.4.4 +- atlite=0.2.5 +- attrs=21.2.0 +- backcall=0.2.0 +- backports=1.0 +- backports.functools_lru_cache=1.6.4 +- beautifulsoup4=4.10.0 +- blosc=1.21.0 +- bokeh=2.3.3 +- boost-cpp=1.74.0 +- bottleneck=1.3.2 +- brotlipy=0.7.0 +- bzip2=1.0.8 +- c-ares=1.17.2 +- ca-certificates=2021.5.30 +- cairo=1.16.0 +- cartopy=0.19.0.post1 +- cdsapi=0.5.1 +- certifi=2021.5.30 +- cffi=1.14.6 +- cfitsio=3.470 +- cftime=1.5.0 +- chardet=4.0.0 +- charset-normalizer=2.0.0 +- click=7.1.2 +- click-plugins=1.1.1 +- cligj=0.7.2 +- cloudpickle=2.0.0 +- coincbc=2.10.5 +- colorama=0.4.4 +- conda=4.10.3 +- conda-package-handling=1.7.3 +- configargparse=1.5.2 +- connection_pool=0.0.3 +- country_converter=0.7.3 +- cryptography=3.4.7 +- curl=7.79.0 +- cycler=0.10.0 +- cytoolz=0.11.0 +- dask=2021.3.1 +- dask-core=2021.3.1 +- datrie=0.8.2 +- dbus=1.13.6 +- decorator=4.4.2 +- deprecation=2.1.0 +- descartes=1.1.0 +- distributed=2021.4.1 +- distro=1.5.0 +- docutils=0.17.1 +- entsoe-py=0.3.7 +- et_xmlfile=1.0.1 +- expat=2.4.1 +- filelock=3.0.12 +- fiona=1.8.18 +- fontconfig=2.13.1 +- freetype=2.10.4 +- freexl=1.0.6 +- fsspec=2021.8.1 +- gdal=3.2.1 +- geographiclib=1.52 +- geopandas=0.9.0 +- geopandas-base=0.9.0 +- geopy=2.2.0 +- geos=3.9.1 +- geotiff=1.6.0 +- gettext=0.19.8.1 +- giflib=5.2.1 +- gitdb=4.0.7 +- gitpython=3.1.23 +- glib=2.68.4 +- glib-tools=2.68.4 +- graphite2=1.3.13 +- gst-plugins-base=1.18.5 +- gstreamer=1.18.5 +- harfbuzz=2.9.1 +- hdf4=4.2.15 +- hdf5=1.10.6 +- heapdict=1.0.1 +- icu=68.1 +- idna=3.1 +- importlib-metadata=4.8.1 +- iniconfig=1.1.1 +- ipython=7.27.0 +- ipython_genutils=0.2.0 +- jdcal=1.4.1 +- jedi=0.18.0 +- jinja2=3.0.1 +- joblib=1.0.1 +- jpeg=9d +- json-c=0.15 +- jsonschema=3.2.0 +- jupyter_core=4.8.1 +- kealib=1.4.14 +- kiwisolver=1.3.2 +- krb5=1.19.2 +- lcms2=2.12 +- ld_impl_linux-64=2.36.1 +- libarchive=3.5.1 +- libblas=3.9.0 +- libcblas=3.9.0 +- libclang=11.1.0 +- libcurl=7.79.0 +- libdap4=3.20.6 +- libedit=3.1.20191231 +- libev=4.33 +- libevent=2.1.10 +- libffi=3.4.2 +- libgcc-ng=11.2.0 +- libgdal=3.2.1 +- libgfortran-ng=11.2.0 +- libgfortran5=11.2.0 +- libglib=2.68.4 +- libgomp=11.2.0 +- libiconv=1.16 +- libkml=1.3.0 +- liblapack=3.9.0 +- libllvm11=11.1.0 +- libnetcdf=4.7.4 +- libnghttp2=1.43.0 +- libogg=1.3.4 +- libopenblas=0.3.17 +- libopus=1.3.1 +- libpng=1.6.37 +- libpq=13.3 +- librttopo=1.1.0 +- libsolv=0.7.19 +- libspatialindex=1.9.3 +- libspatialite=5.0.1 +- libssh2=1.10.0 +- libstdcxx-ng=11.2.0 +- libtiff=4.2.0 +- libuuid=2.32.1 +- libvorbis=1.3.7 +- libwebp-base=1.2.1 +- libxcb=1.13 +- libxkbcommon=1.0.3 +- libxml2=2.9.12 +- libxslt=1.1.33 +- locket=0.2.0 +- lxml=4.6.3 +- lz4-c=1.9.3 +- lzo=2.10 +- mamba=0.15.3 +- mapclassify=2.4.3 +- markupsafe=2.0.1 +- matplotlib=3.4.3 +- matplotlib-base=3.4.3 +- matplotlib-inline=0.1.3 +- memory_profiler=0.58.0 +- mock=4.0.3 +- more-itertools=8.10.0 +- msgpack-python=1.0.2 +- munch=2.5.0 +- mysql-common=8.0.25 +- mysql-libs=8.0.25 +- nbformat=5.1.3 +- ncurses=6.2 +- netcdf4=1.5.6 +- networkx=2.6.3 +- nspr=4.30 +- nss=3.69 +- numexpr=2.7.3 +- numpy=1.21.2 +- olefile=0.46 +- openjdk=11.0.9.1 +- openjpeg=2.4.0 +- openpyxl=3.0.8 +- openssl=1.1.1l +- packaging=21.0 +- pandas=1.2.5 +- parso=0.8.2 +- partd=1.2.0 +- patsy=0.5.1 +- pcre=8.45 +- pexpect=4.8.0 +- pickleshare=0.7.5 +- pillow=8.2.0 +- pip=21.2.4 +- pixman=0.40.0 +- pluggy=1.0.0 +- ply=3.11 +- poppler=0.89.0 +- poppler-data=0.4.11 +- postgresql=13.3 +- powerplantmatching=0.4.8 +- progressbar2=3.53.1 +- proj=7.2.0 +- prompt-toolkit=3.0.20 +- psutil=5.8.0 +- pthread-stubs=0.4 +- ptyprocess=0.7.0 +- pulp=2.5.0 +- py=1.10.0 +- pycosat=0.6.3 +- pycountry=20.7.3 +- pycparser=2.20 +- pygments=2.10.0 +- pyomo=6.1.2 +- pyopenssl=20.0.1 +- pyparsing=2.4.7 +- pyproj=3.1.0 +- pypsa=0.18.0 +- pyqt=5.12.3 +- pyqt-impl=5.12.3 +- pyqt5-sip=4.19.18 +- pyqtchart=5.12 +- pyqtwebengine=5.12.1 +- pyrsistent=0.17.3 +- pyshp=2.1.3 +- pysocks=1.7.1 +- pytables=3.6.1 +- pytest=6.2.5 +- python=3.9.7 +- python-dateutil=2.8.2 +- python-utils=2.5.6 +- python_abi=3.9 +- pytz=2021.1 +- pyyaml=5.4.1 +- qt=5.12.9 +- rasterio=1.2.6 +- ratelimiter=1.2.0 +- readline=8.1 +- reproc=14.2.3 +- reproc-cpp=14.2.3 +- requests=2.26.0 +- rtree=0.9.7 +- ruamel_yaml=0.15.80 +- scikit-learn=0.24.2 +- scipy=1.7.1 +- seaborn=0.11.2 +- seaborn-base=0.11.2 +- setuptools=58.0.4 +- setuptools-scm=6.3.2 +- setuptools_scm=6.3.2 +- shapely=1.7.1 +- six=1.16.0 +- smart_open=5.2.1 +- smmap=3.0.5 +- snakemake-minimal=6.8.0 +- snuggs=1.4.7 +- sortedcontainers=2.4.0 +- soupsieve=2.0.1 +- sqlite=3.36.0 +- statsmodels=0.12.2 +- stopit=1.1.2 +- tabula-py=2.2.0 +- tabulate=0.8.9 +- tblib=1.7.0 +- threadpoolctl=2.2.0 +- tiledb=2.2.9 +- tk=8.6.11 +- toml=0.10.2 +- tomli=1.2.1 +- toolz=0.11.1 +- toposort=1.6 +- tornado=6.1 +- tqdm=4.62.3 +- traitlets=5.1.0 +- typing_extensions=3.10.0.2 +- tzcode=2021a +- tzdata=2021a +- urllib3=1.26.6 +- wcwidth=0.2.5 +- wheel=0.37.0 +- wrapt=1.12.1 +- xarray=0.19.0 +- xerces-c=3.2.3 +- xlrd=2.0.1 +- xorg-fixesproto=5.0 +- xorg-inputproto=2.3.2 +- xorg-kbproto=1.0.7 +- xorg-libice=1.0.10 +- xorg-libsm=1.2.3 +- xorg-libx11=1.7.2 +- xorg-libxau=1.0.9 +- xorg-libxdmcp=1.1.3 +- xorg-libxext=1.3.4 +- xorg-libxfixes=5.0.3 +- xorg-libxi=1.7.10 +- xorg-libxrender=0.9.10 +- xorg-libxtst=1.2.3 +- xorg-recordproto=1.14.2 +- xorg-renderproto=0.11.1 +- xorg-xextproto=7.3.0 +- xorg-xproto=7.0.31 +- xz=5.2.5 +- yaml=0.2.5 +- zict=2.0.0 +- zipp=3.5.0 +- zlib=1.2.11 +- zstd=1.4.9 +- pip: + - countrycode==0.2 + - sklearn==0.0 + - tsam==1.1.1 + - vresutils==0.3.1 diff --git a/envs/environment.yaml b/envs/environment.yaml index 29d743ac2..692def7c2 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -2,59 +2,59 @@ # # SPDX-License-Identifier: MIT -name: pypsa-eur +name: pypsa-eur-2 channels: - - conda-forge - - bioconda - - http://conda.anaconda.org/gurobi +- conda-forge +- bioconda +- http://conda.anaconda.org/gurobi dependencies: - - python>=3.8 - - pip - - mamba # esp for windows build +- python>=3.8 +- pip +- mamba # esp for windows build - - pypsa>=0.18 - - atlite>=0.2.5 - - dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved +- pypsa>=0.18 +- atlite>=0.2.5 +- dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved # Dependencies of the workflow itself - - xlrd - - openpyxl - - pycountry - - seaborn - - snakemake-minimal - - memory_profiler - - yaml - - pytables - - lxml - - powerplantmatching>=0.4.8 - - numpy - - pandas<1.3 - - geopandas - - xarray - - netcdf4 - - networkx - - scipy - - shapely - - progressbar2 - - pyomo - - matplotlib - - proj<8 +- xlrd +- openpyxl +- pycountry +- seaborn +- snakemake-minimal +- memory_profiler +- yaml +- pytables +- lxml +- powerplantmatching>=0.4.8 +- numpy +- pandas<1.3 +- geopandas +- xarray +- netcdf4 +- networkx +- scipy +- shapely +- progressbar2 +- pyomo +- matplotlib +- proj<8 # Keep in conda environment when calling ipython - - ipython +- ipython # GIS dependencies: - - cartopy - - descartes - - rasterio +- cartopy +- descartes +- rasterio # PyPSA-Eur-Sec Dependencies - - geopy - - tqdm - - pytz - - country_converter - - tabula-py +- geopy +- tqdm +- pytz +- country_converter +- tabula-py - - pip: - - vresutils==0.3.1 - - tsam>=1.1.0 +- pip: + - vresutils==0.3.1 + - tsam>=1.1.0 diff --git a/scripts/_helpers.py b/scripts/_helpers.py index f1e5e8877..053ca0a4b 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -27,21 +28,26 @@ def configure_logging(snakemake, skip_handlers=False): import logging - kwargs = snakemake.config.get('logging', dict()) + kwargs = snakemake.config.get("logging", dict()) kwargs.setdefault("level", "INFO") if skip_handlers is False: - fallback_path = Path(__file__).parent.joinpath('..', 'logs', f"{snakemake.rule}.log") - logfile = snakemake.log.get('python', snakemake.log[0] if snakemake.log - else fallback_path) + fallback_path = Path(__file__).parent.joinpath( + "..", "logs", f"{snakemake.rule}.log" + ) + logfile = snakemake.log.get( + "python", snakemake.log[0] if snakemake.log else fallback_path + ) kwargs.update( - {'handlers': [ - # Prefer the 'python' log, otherwise take the first log for each - # Snakemake rule - logging.FileHandler(logfile), - logging.StreamHandler() + { + "handlers": [ + # Prefer the 'python' log, otherwise take the first log for each + # Snakemake rule + logging.FileHandler(logfile), + logging.StreamHandler(), ] - }) + } + ) logging.basicConfig(**kwargs) @@ -79,21 +85,28 @@ def load_network(import_name=None, custom_components=None): if custom_components is not None: override_components = pypsa.components.components.copy() - override_component_attrs = Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) + override_component_attrs = Dict( + {k: v.copy() for k, v in pypsa.components.component_attrs.items()} + ) for k, v in custom_components.items(): - override_components.loc[k] = v['component'] - override_component_attrs[k] = pd.DataFrame(columns = ["type","unit","default","description","status"]) - for attr, val in v['attributes'].items(): + override_components.loc[k] = v["component"] + override_component_attrs[k] = pd.DataFrame( + columns=["type", "unit", "default", "description", "status"] + ) + for attr, val in v["attributes"].items(): override_component_attrs[k].loc[attr] = val - return pypsa.Network(import_name=import_name, - override_components=override_components, - override_component_attrs=override_component_attrs) + return pypsa.Network( + import_name=import_name, + override_components=override_components, + override_component_attrs=override_component_attrs, + ) def pdbcast(v, h): - return pd.DataFrame(v.values.reshape((-1, 1)) * h.values, - index=v.index, columns=h.index) + return pd.DataFrame( + v.values.reshape((-1, 1)) * h.values, index=v.index, columns=h.index + ) def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): @@ -105,100 +118,137 @@ def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load" n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) - n.links["carrier"] = (n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)) + n.links["carrier"] = ( + n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier) + ) n.lines["carrier"] = "AC line" n.transformers["carrier"] = "AC transformer" - n.lines['s_nom'] = n.lines['s_nom_min'] - n.links['p_nom'] = n.links['p_nom_min'] + n.lines["s_nom"] = n.lines["s_nom_min"] + n.links["p_nom"] = n.links["p_nom_min"] if combine_hydro_ps: - n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS' + n.storage_units.loc[ + n.storage_units.carrier.isin({"PHS", "hydro"}), "carrier" + ] = "hydro+PHS" # if the carrier was not set on the heat storage units # bus_carrier = n.storage_units.bus.map(n.buses.carrier) # n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks" - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity']) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs(Nyears, tech_costs, config["costs"], config["electricity"]) update_transmission_costs(n, costs) return n + def update_p_nom_max(n): # if extendable carriers (solar/onwind/...) have capacity >= 0, # e.g. existing assets from the OPSD project are included to the network, # the installed capacity might exceed the expansion limit. # Hence, we update the assumptions. - - n.generators.p_nom_max = n.generators[['p_nom_min', 'p_nom_max']].max(1) + + n.generators.p_nom_max = n.generators[["p_nom_min", "p_nom_max"]].max(1) + def aggregate_p_nom(n): - return pd.concat([ - n.generators.groupby("carrier").p_nom_opt.sum(), - n.storage_units.groupby("carrier").p_nom_opt.sum(), - n.links.groupby("carrier").p_nom_opt.sum(), - n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean() - ]) + return pd.concat( + [ + n.generators.groupby("carrier").p_nom_opt.sum(), + n.storage_units.groupby("carrier").p_nom_opt.sum(), + n.links.groupby("carrier").p_nom_opt.sum(), + n.loads_t.p.groupby(n.loads.carrier, axis=1).sum().mean(), + ] + ) + def aggregate_p(n): - return pd.concat([ - n.generators_t.p.sum().groupby(n.generators.carrier).sum(), - n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(), - n.stores_t.p.sum().groupby(n.stores.carrier).sum(), - -n.loads_t.p.sum().groupby(n.loads.carrier).sum() - ]) + return pd.concat( + [ + n.generators_t.p.sum().groupby(n.generators.carrier).sum(), + n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(), + n.stores_t.p.sum().groupby(n.stores.carrier).sum(), + -n.loads_t.p.sum().groupby(n.loads.carrier).sum(), + ] + ) + def aggregate_e_nom(n): - return pd.concat([ - (n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(), - n.stores["e_nom_opt"].groupby(n.stores.carrier).sum() - ]) + return pd.concat( + [ + (n.storage_units["p_nom_opt"] * n.storage_units["max_hours"]) + .groupby(n.storage_units["carrier"]) + .sum(), + n.stores["e_nom_opt"].groupby(n.stores.carrier).sum(), + ] + ) + def aggregate_p_curtailed(n): - return pd.concat([ - ((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum()) - .groupby(n.generators.carrier).sum()), - ((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) - .groupby(n.storage_units.carrier).sum()) - ]) + return pd.concat( + [ + ( + ( + n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) + - n.generators_t.p.sum() + ) + .groupby(n.generators.carrier) + .sum() + ), + ( + (n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) + .groupby(n.storage_units.carrier) + .sum() + ), + ] + ) + def aggregate_costs(n, flatten=False, opts=None, existing_only=False): - components = dict(Link=("p_nom", "p0"), - Generator=("p_nom", "p"), - StorageUnit=("p_nom", "p"), - Store=("e_nom", "p"), - Line=("s_nom", None), - Transformer=("s_nom", None)) + components = dict( + Link=("p_nom", "p0"), + Generator=("p_nom", "p"), + StorageUnit=("p_nom", "p"), + Store=("e_nom", "p"), + Line=("s_nom", None), + Transformer=("s_nom", None), + ) costs = {} for c, (p_nom, p_attr) in zip( - n.iterate_components(components.keys(), skip_empty=False), - components.values() + n.iterate_components(components.keys(), skip_empty=False), components.values() ): - if c.df.empty: continue - if not existing_only: p_nom += "_opt" - costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() + if c.df.empty: + continue + if not existing_only: + p_nom += "_opt" + costs[(c.list_name, "capital")] = ( + (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() + ) if p_attr is not None: p = c.pnl[p_attr].sum() - if c.name == 'StorageUnit': + if c.name == "StorageUnit": p = p.loc[p > 0] - costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum() + costs[(c.list_name, "marginal")] = ( + (p * c.df.marginal_cost).groupby(c.df.carrier).sum() + ) costs = pd.concat(costs) if flatten: assert opts is not None - conv_techs = opts['conv_techs'] + conv_techs = opts["conv_techs"] costs = costs.reset_index(level=0, drop=True) - costs = costs['capital'].add( - costs['marginal'].rename({t: t + ' marginal' for t in conv_techs}), - fill_value=0. + costs = costs["capital"].add( + costs["marginal"].rename({t: t + " marginal" for t in conv_techs}), + fill_value=0.0, ) return costs + def progress_retrieve(url, file): import urllib from progressbar import ProgressBar @@ -206,7 +256,7 @@ def progress_retrieve(url, file): pbar = ProgressBar(0, 100) def dlProgress(count, blockSize, totalSize): - pbar.update( int(count * blockSize * 100 / totalSize) ) + pbar.update(int(count * blockSize * 100 / totalSize)) urllib.request.urlretrieve(url, file, reporthook=dlProgress) @@ -233,8 +283,9 @@ def mock_snakemake(rulename, **wildcards): from snakemake.script import Snakemake script_dir = Path(__file__).parent.resolve() - assert Path.cwd().resolve() == script_dir, \ - f'mock_snakemake has to be run from the repository scripts directory {script_dir}' + assert ( + Path.cwd().resolve() == script_dir + ), f"mock_snakemake has to be run from the repository scripts directory {script_dir}" os.chdir(script_dir.parent) for p in sm.SNAKEFILE_CHOICES: if os.path.exists(p): @@ -254,9 +305,18 @@ def make_accessable(*ios): io[i] = os.path.abspath(io[i]) make_accessable(job.input, job.output, job.log) - snakemake = Snakemake(job.input, job.output, job.params, job.wildcards, - job.threads, job.resources, job.log, - job.dag.workflow.config, job.rule.name, None,) + snakemake = Snakemake( + job.input, + job.output, + job.params, + job.wildcards, + job.threads, + job.resources, + job.log, + job.dag.workflow.config, + job.rule.name, + None, + ) # create log and output dir if not existent for path in list(snakemake.log) + list(snakemake.output): Path(path).parent.mkdir(parents=True, exist_ok=True) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 08a32a26f..22accfffd 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -103,83 +104,103 @@ logger = logging.getLogger(__name__) -def normed(s): return s/s.sum() +def normed(s): + return s / s.sum() def _add_missing_carriers_from_costs(n, costs, carriers): missing_carriers = pd.Index(carriers).difference(n.carriers.index) - if missing_carriers.empty: return + if missing_carriers.empty: + return - emissions_cols = costs.columns.to_series()\ - .loc[lambda s: s.str.endswith('_emissions')].values - suptechs = missing_carriers.str.split('-').str[0] - emissions = costs.loc[suptechs, emissions_cols].fillna(0.) + emissions_cols = ( + costs.columns.to_series().loc[lambda s: s.str.endswith("_emissions")].values + ) + suptechs = missing_carriers.str.split("-").str[0] + emissions = costs.loc[suptechs, emissions_cols].fillna(0.0) emissions.index = missing_carriers - n.import_components_from_dataframe(emissions, 'Carrier') + n.import_components_from_dataframe(emissions, "Carrier") -def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None): +def load_costs(Nyears=1.0, tech_costs=None, config=None, elec_config=None): if tech_costs is None: tech_costs = snakemake.input.tech_costs if config is None: - config = snakemake.config['costs'] + config = snakemake.config["costs"] # set all asset costs and other parameters costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index() # correct units to MW and EUR - costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3 - costs.loc[costs.unit.str.contains("USD"),"value"] *= config['USD2013_to_EUR2013'] - - costs = (costs.loc[idx[:,config['year'],:], "value"] - .unstack(level=2).groupby("technology").sum(min_count=1)) - - costs = costs.fillna({"CO2 intensity" : 0, - "FOM" : 0, - "VOM" : 0, - "discount rate" : config['discountrate'], - "efficiency" : 1, - "fuel" : 0, - "investment" : 0, - "lifetime" : 25}) - - costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) + - costs["FOM"]/100.) * - costs["investment"] * Nyears) - - costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel'] - costs.at['CCGT', 'fuel'] = costs.at['gas', 'fuel'] - - costs['marginal_cost'] = costs['VOM'] + costs['fuel'] / costs['efficiency'] + costs.loc[costs.unit.str.contains("/kW"), "value"] *= 1e3 + costs.loc[costs.unit.str.contains("USD"), "value"] *= config["USD2013_to_EUR2013"] + + costs = ( + costs.loc[idx[:, config["year"], :], "value"] + .unstack(level=2) + .groupby("technology") + .sum(min_count=1) + ) + + costs = costs.fillna( + { + "CO2 intensity": 0, + "FOM": 0, + "VOM": 0, + "discount rate": config["discountrate"], + "efficiency": 1, + "fuel": 0, + "investment": 0, + "lifetime": 25, + } + ) + + costs["capital_cost"] = ( + (annuity(costs["lifetime"], costs["discount rate"]) + costs["FOM"] / 100.0) + * costs["investment"] + * Nyears + ) + + costs.at["OCGT", "fuel"] = costs.at["gas", "fuel"] + costs.at["CCGT", "fuel"] = costs.at["gas", "fuel"] + + costs["marginal_cost"] = costs["VOM"] + costs["fuel"] / costs["efficiency"] costs = costs.rename(columns={"CO2 intensity": "co2_emissions"}) - costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions'] - costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions'] + costs.at["OCGT", "co2_emissions"] = costs.at["gas", "co2_emissions"] + costs.at["CCGT", "co2_emissions"] = costs.at["gas", "co2_emissions"] - costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] + - costs.at['solar-utility', 'capital_cost']) + costs.at["solar", "capital_cost"] = 0.5 * ( + costs.at["solar-rooftop", "capital_cost"] + + costs.at["solar-utility", "capital_cost"] + ) - def costs_for_storage(store, link1, link2=None, max_hours=1.): - capital_cost = link1['capital_cost'] + max_hours * store['capital_cost'] + def costs_for_storage(store, link1, link2=None, max_hours=1.0): + capital_cost = link1["capital_cost"] + max_hours * store["capital_cost"] if link2 is not None: - capital_cost += link2['capital_cost'] - return pd.Series(dict(capital_cost=capital_cost, - marginal_cost=0., - co2_emissions=0.)) + capital_cost += link2["capital_cost"] + return pd.Series( + dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0) + ) if elec_config is None: - elec_config = snakemake.config['electricity'] - max_hours = elec_config['max_hours'] - costs.loc["battery"] = \ - costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"], - max_hours=max_hours['battery']) - costs.loc["H2"] = \ - costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"], - costs.loc["electrolysis"], max_hours=max_hours['H2']) - - for attr in ('marginal_cost', 'capital_cost'): + elec_config = snakemake.config["electricity"] + max_hours = elec_config["max_hours"] + costs.loc["battery"] = costs_for_storage( + costs.loc["battery storage"], + costs.loc["battery inverter"], + max_hours=max_hours["battery"], + ) + costs.loc["H2"] = costs_for_storage( + costs.loc["hydrogen storage"], + costs.loc["fuel cell"], + costs.loc["electrolysis"], + max_hours=max_hours["H2"], + ) + + for attr in ("marginal_cost", "capital_cost"): overwrites = config.get(attr) if overwrites is not None: overwrites = pd.Series(overwrites) @@ -191,26 +212,38 @@ def costs_for_storage(store, link1, link2=None, max_hours=1.): def load_powerplants(ppl_fn=None): if ppl_fn is None: ppl_fn = snakemake.input.powerplants - carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass', - 'ccgt, thermal': 'CCGT', 'hard coal': 'coal'} - return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'}) - .powerplant.to_pypsa_names() - .rename(columns=str.lower).drop(columns=['efficiency']) - .replace({'carrier': carrier_dict})) + carrier_dict = { + "ocgt": "OCGT", + "ccgt": "CCGT", + "bioenergy": "biomass", + "ccgt, thermal": "CCGT", + "hard coal": "coal", + } + return ( + pd.read_csv(ppl_fn, index_col=0, dtype={"bus": "str"}) + .powerplant.to_pypsa_names() + .rename(columns=str.lower) + .drop(columns=["efficiency"]) + .replace({"carrier": carrier_dict}) + ) def attach_load(n): - substation_lv_i = n.buses.index[n.buses['substation_lv']] - regions = (gpd.read_file(snakemake.input.regions).set_index('name') - .reindex(substation_lv_i)) - opsd_load = (pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True) - .filter(items=snakemake.config['countries'])) - - scaling = snakemake.config.get('load', {}).get('scaling_factor', 1.0) + substation_lv_i = n.buses.index[n.buses["substation_lv"]] + regions = ( + gpd.read_file(snakemake.input.regions) + .set_index("name") + .reindex(substation_lv_i) + ) + opsd_load = pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True).filter( + items=snakemake.config["countries"] + ) + + scaling = snakemake.config.get("load", {}).get("scaling_factor", 1.0) logger.info(f"Load data scaled with scalling factor {scaling}.") opsd_load *= scaling - nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') + nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") def upsample(cntry, group): l = opsd_load[cntry] @@ -218,350 +251,471 @@ def upsample(cntry, group): return pd.DataFrame({group.index[0]: l}) else: nuts3_cntry = nuts3.loc[nuts3.country == cntry] - transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry, - normed=False).T.tocsr() - gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values), - index=group.index) - pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values), - index=group.index) + transfer = vtransfer.Shapes2Shapes( + group, nuts3_cntry.geometry, normed=False + ).T.tocsr() + gdp_n = pd.Series( + transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index + ) + pop_n = pd.Series( + transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index + ) # relative factors 0.6 and 0.4 have been determined from a linear # regression on the country to continent load data # (refer to vresutils.load._upsampling_weights) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) - return pd.DataFrame(factors.values * l.values[:,np.newaxis], - index=l.index, columns=factors.index) - - load = pd.concat([upsample(cntry, group) for cntry, group - in regions.geometry.groupby(regions.country)], axis=1) + return pd.DataFrame( + factors.values * l.values[:, np.newaxis], + index=l.index, + columns=factors.index, + ) + + load = pd.concat( + [ + upsample(cntry, group) + for cntry, group in regions.geometry.groupby(regions.country) + ], + axis=1, + ) n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False): - n.lines['capital_cost'] = (n.lines['length'] * length_factor * - costs.at['HVAC overhead', 'capital_cost']) + n.lines["capital_cost"] = ( + n.lines["length"] * length_factor * costs.at["HVAC overhead", "capital_cost"] + ) - if n.links.empty: return + if n.links.empty: + return - dc_b = n.links.carrier == 'DC' + dc_b = n.links.carrier == "DC" # If there are no dc links, then the 'underwater_fraction' column # may be missing. Therefore we have to return here. - if n.links.loc[dc_b].empty: return + if n.links.loc[dc_b].empty: + return if simple_hvdc_costs: - costs = (n.links.loc[dc_b, 'length'] * length_factor * - costs.at['HVDC overhead', 'capital_cost']) + costs = ( + n.links.loc[dc_b, "length"] + * length_factor + * costs.at["HVDC overhead", "capital_cost"] + ) else: - costs = (n.links.loc[dc_b, 'length'] * length_factor * - ((1. - n.links.loc[dc_b, 'underwater_fraction']) * - costs.at['HVDC overhead', 'capital_cost'] + - n.links.loc[dc_b, 'underwater_fraction'] * - costs.at['HVDC submarine', 'capital_cost']) + - costs.at['HVDC inverter pair', 'capital_cost']) - n.links.loc[dc_b, 'capital_cost'] = costs + costs = ( + n.links.loc[dc_b, "length"] + * length_factor + * ( + (1.0 - n.links.loc[dc_b, "underwater_fraction"]) + * costs.at["HVDC overhead", "capital_cost"] + + n.links.loc[dc_b, "underwater_fraction"] + * costs.at["HVDC submarine", "capital_cost"] + ) + + costs.at["HVDC inverter pair", "capital_cost"] + ) + n.links.loc[dc_b, "capital_cost"] = costs def attach_wind_and_solar(n, costs): - for tech in snakemake.config['renewable']: - if tech == 'hydro': continue + for tech in snakemake.config["renewable"]: + if tech == "hydro": + continue n.add("Carrier", name=tech) - with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds: - if ds.indexes['bus'].empty: continue - - suptech = tech.split('-', 2)[0] - if suptech == 'offwind': - underwater_fraction = ds['underwater_fraction'].to_pandas() - connection_cost = (snakemake.config['lines']['length_factor'] * - ds['average_distance'].to_pandas() * - (underwater_fraction * - costs.at[tech + '-connection-submarine', 'capital_cost'] + - (1. - underwater_fraction) * - costs.at[tech + '-connection-underground', 'capital_cost'])) - capital_cost = (costs.at['offwind', 'capital_cost'] + - costs.at[tech + '-station', 'capital_cost'] + - connection_cost) - logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}" - .format(connection_cost.min(), connection_cost.max(), tech)) + with xr.open_dataset(getattr(snakemake.input, "profile_" + tech)) as ds: + if ds.indexes["bus"].empty: + continue + + suptech = tech.split("-", 2)[0] + if suptech == "offwind": + underwater_fraction = ds["underwater_fraction"].to_pandas() + connection_cost = ( + snakemake.config["lines"]["length_factor"] + * ds["average_distance"].to_pandas() + * ( + underwater_fraction + * costs.at[tech + "-connection-submarine", "capital_cost"] + + (1.0 - underwater_fraction) + * costs.at[tech + "-connection-underground", "capital_cost"] + ) + ) + capital_cost = ( + costs.at["offwind", "capital_cost"] + + costs.at[tech + "-station", "capital_cost"] + + connection_cost + ) + logger.info( + "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( + connection_cost.min(), connection_cost.max(), tech + ) + ) else: - capital_cost = costs.at[tech, 'capital_cost'] + capital_cost = costs.at[tech, "capital_cost"] - n.madd("Generator", ds.indexes['bus'], ' ' + tech, - bus=ds.indexes['bus'], - carrier=tech, - p_nom_extendable=True, - p_nom_max=ds['p_nom_max'].to_pandas(), - weight=ds['weight'].to_pandas(), - marginal_cost=costs.at[suptech, 'marginal_cost'], - capital_cost=capital_cost, - efficiency=costs.at[suptech, 'efficiency'], - p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) + n.madd( + "Generator", + ds.indexes["bus"], + " " + tech, + bus=ds.indexes["bus"], + carrier=tech, + p_nom_extendable=True, + p_nom_max=ds["p_nom_max"].to_pandas(), + weight=ds["weight"].to_pandas(), + marginal_cost=costs.at[suptech, "marginal_cost"], + capital_cost=capital_cost, + efficiency=costs.at[suptech, "efficiency"], + p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(), + ) def attach_conventional_generators(n, costs, ppl): - carriers = snakemake.config['electricity']['conventional_carriers'] + carriers = snakemake.config["electricity"]["conventional_carriers"] _add_missing_carriers_from_costs(n, costs, carriers) - ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier') - .rename(index=lambda s: 'C' + str(s))) + ppl = ( + ppl.query("carrier in @carriers") + .join(costs, on="carrier") + .rename(index=lambda s: "C" + str(s)) + ) - logger.info('Adding {} generators with capacities [MW] \n{}' - .format(len(ppl), ppl.groupby('carrier').p_nom.sum())) + logger.info( + "Adding {} generators with capacities [MW] \n{}".format( + len(ppl), ppl.groupby("carrier").p_nom.sum() + ) + ) - n.madd("Generator", ppl.index, - carrier=ppl.carrier, - bus=ppl.bus, - p_nom=ppl.p_nom, - efficiency=ppl.efficiency, - marginal_cost=ppl.marginal_cost, - capital_cost=0) + n.madd( + "Generator", + ppl.index, + carrier=ppl.carrier, + bus=ppl.bus, + p_nom=ppl.p_nom, + efficiency=ppl.efficiency, + marginal_cost=ppl.marginal_cost, + capital_cost=0, + ) - logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.') + logger.warning(f"Capital costs for conventional generators put to 0 EUR/MW.") def attach_hydro(n, costs, ppl): - if 'hydro' not in snakemake.config['renewable']: return - c = snakemake.config['renewable']['hydro'] - carriers = c.get('carriers', ['ror', 'PHS', 'hydro']) + if "hydro" not in snakemake.config["renewable"]: + return + c = snakemake.config["renewable"]["hydro"] + carriers = c.get("carriers", ["ror", "PHS", "hydro"]) _add_missing_carriers_from_costs(n, costs, carriers) - ppl = ppl.query('carrier == "hydro"').reset_index(drop=True)\ - .rename(index=lambda s: str(s) + ' hydro') + ppl = ( + ppl.query('carrier == "hydro"') + .reset_index(drop=True) + .rename(index=lambda s: str(s) + " hydro") + ) ror = ppl.query('technology == "Run-Of-River"') phs = ppl.query('technology == "Pumped Storage"') hydro = ppl.query('technology == "Reservoir"') - country = ppl['bus'].map(n.buses.country).rename("country") + country = ppl["bus"].map(n.buses.country).rename("country") inflow_idx = ror.index.union(hydro.index) if not inflow_idx.empty: - dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed) + dist_key = ppl.loc[inflow_idx, "p_nom"].groupby(country).transform(normed) with xr.open_dataarray(snakemake.input.profile_hydro) as inflow: inflow_countries = pd.Index(country[inflow_idx]) - missing_c = (inflow_countries.unique() - .difference(inflow.indexes['countries'])) - assert missing_c.empty, (f"'{snakemake.input.profile_hydro}' is missing " - f"inflow time-series for at least one country: {', '.join(missing_c)}") - - inflow_t = (inflow.sel(countries=inflow_countries) - .rename({'countries': 'name'}) - .assign_coords(name=inflow_idx) - .transpose('time', 'name') - .to_pandas() - .multiply(dist_key, axis=1)) - - if 'ror' in carriers and not ror.empty: - n.madd("Generator", ror.index, - carrier='ror', - bus=ror['bus'], - p_nom=ror['p_nom'], - efficiency=costs.at['ror', 'efficiency'], - capital_cost=costs.at['ror', 'capital_cost'], - weight=ror['p_nom'], - p_max_pu=(inflow_t[ror.index] - .divide(ror['p_nom'], axis=1) - .where(lambda df: df<=1., other=1.))) - - if 'PHS' in carriers and not phs.empty: + missing_c = inflow_countries.unique().difference( + inflow.indexes["countries"] + ) + assert missing_c.empty, ( + f"'{snakemake.input.profile_hydro}' is missing " + f"inflow time-series for at least one country: {', '.join(missing_c)}" + ) + + inflow_t = ( + inflow.sel(countries=inflow_countries) + .rename({"countries": "name"}) + .assign_coords(name=inflow_idx) + .transpose("time", "name") + .to_pandas() + .multiply(dist_key, axis=1) + ) + + if "ror" in carriers and not ror.empty: + n.madd( + "Generator", + ror.index, + carrier="ror", + bus=ror["bus"], + p_nom=ror["p_nom"], + efficiency=costs.at["ror", "efficiency"], + capital_cost=costs.at["ror", "capital_cost"], + weight=ror["p_nom"], + p_max_pu=( + inflow_t[ror.index] + .divide(ror["p_nom"], axis=1) + .where(lambda df: df <= 1.0, other=1.0) + ), + ) + + if "PHS" in carriers and not phs.empty: # fill missing max hours to config value and # assume no natural inflow due to lack of data - phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}}) - n.madd('StorageUnit', phs.index, - carrier='PHS', - bus=phs['bus'], - p_nom=phs['p_nom'], - capital_cost=costs.at['PHS', 'capital_cost'], - max_hours=phs['max_hours'], - efficiency_store=np.sqrt(costs.at['PHS','efficiency']), - efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']), - cyclic_state_of_charge=True) - - if 'hydro' in carriers and not hydro.empty: - hydro_max_hours = c.get('hydro_max_hours') - hydro_stats = pd.read_csv(snakemake.input.hydro_capacities, - comment="#", na_values='-', index_col=0) + phs = phs.replace({"max_hours": {0: c["PHS_max_hours"]}}) + n.madd( + "StorageUnit", + phs.index, + carrier="PHS", + bus=phs["bus"], + p_nom=phs["p_nom"], + capital_cost=costs.at["PHS", "capital_cost"], + max_hours=phs["max_hours"], + efficiency_store=np.sqrt(costs.at["PHS", "efficiency"]), + efficiency_dispatch=np.sqrt(costs.at["PHS", "efficiency"]), + cyclic_state_of_charge=True, + ) + + if "hydro" in carriers and not hydro.empty: + hydro_max_hours = c.get("hydro_max_hours") + hydro_stats = pd.read_csv( + snakemake.input.hydro_capacities, comment="#", na_values="-", index_col=0 + ) e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6 - e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum() + e_installed = hydro.eval("p_nom * max_hours").groupby(hydro.country).sum() e_missing = e_target - e_installed - missing_mh_i = hydro.query('max_hours == 0').index + missing_mh_i = hydro.query("max_hours == 0").index - if hydro_max_hours == 'energy_capacity_totals_by_country': + if hydro_max_hours == "energy_capacity_totals_by_country": # watch out some p_nom values like IE's are totally underrepresented - max_hours_country = e_missing / \ - hydro.loc[missing_mh_i].groupby('country').p_nom.sum() - - elif hydro_max_hours == 'estimate_by_large_installations': - max_hours_country = hydro_stats['E_store[TWh]'] * 1e3 / \ - hydro_stats['p_nom_discharge[GW]'] - - missing_countries = (pd.Index(hydro['country'].unique()) - .difference(max_hours_country.dropna().index)) + max_hours_country = ( + e_missing / hydro.loc[missing_mh_i].groupby("country").p_nom.sum() + ) + + elif hydro_max_hours == "estimate_by_large_installations": + max_hours_country = ( + hydro_stats["E_store[TWh]"] * 1e3 / hydro_stats["p_nom_discharge[GW]"] + ) + + missing_countries = pd.Index(hydro["country"].unique()).difference( + max_hours_country.dropna().index + ) if not missing_countries.empty: - logger.warning("Assuming max_hours=6 for hydro reservoirs in the countries: {}" - .format(", ".join(missing_countries))) - hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0, - hydro.country.map(max_hours_country)).fillna(6) - - n.madd('StorageUnit', hydro.index, carrier='hydro', - bus=hydro['bus'], - p_nom=hydro['p_nom'], - max_hours=hydro_max_hours, - capital_cost=(costs.at['hydro', 'capital_cost'] - if c.get('hydro_capital_cost') else 0.), - marginal_cost=costs.at['hydro', 'marginal_cost'], - p_max_pu=1., # dispatch - p_min_pu=0., # store - efficiency_dispatch=costs.at['hydro', 'efficiency'], - efficiency_store=0., - cyclic_state_of_charge=True, - inflow=inflow_t.loc[:, hydro.index]) + logger.warning( + "Assuming max_hours=6 for hydro reservoirs in the countries: {}".format( + ", ".join(missing_countries) + ) + ) + hydro_max_hours = hydro.max_hours.where( + hydro.max_hours > 0, hydro.country.map(max_hours_country) + ).fillna(6) + + n.madd( + "StorageUnit", + hydro.index, + carrier="hydro", + bus=hydro["bus"], + p_nom=hydro["p_nom"], + max_hours=hydro_max_hours, + capital_cost=( + costs.at["hydro", "capital_cost"] + if c.get("hydro_capital_cost") + else 0.0 + ), + marginal_cost=costs.at["hydro", "marginal_cost"], + p_max_pu=1.0, # dispatch + p_min_pu=0.0, # store + efficiency_dispatch=costs.at["hydro", "efficiency"], + efficiency_store=0.0, + cyclic_state_of_charge=True, + inflow=inflow_t.loc[:, hydro.index], + ) def attach_extendable_generators(n, costs, ppl): - elec_opts = snakemake.config['electricity'] - carriers = pd.Index(elec_opts['extendable_carriers']['Generator']) + elec_opts = snakemake.config["electricity"] + carriers = pd.Index(elec_opts["extendable_carriers"]["Generator"]) _add_missing_carriers_from_costs(n, costs, carriers) for tech in carriers: - if tech.startswith('OCGT'): - ocgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first() - n.madd('Generator', ocgt.index, - suffix=' OCGT', - bus=ocgt['bus'], - carrier=tech, - p_nom_extendable=True, - p_nom=0., - capital_cost=costs.at['OCGT', 'capital_cost'], - marginal_cost=costs.at['OCGT', 'marginal_cost'], - efficiency=costs.at['OCGT', 'efficiency']) - - elif tech.startswith('CCGT'): - ccgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first() - n.madd('Generator', ccgt.index, - suffix=' CCGT', - bus=ccgt['bus'], - carrier=tech, - p_nom_extendable=True, - p_nom=0., - capital_cost=costs.at['CCGT', 'capital_cost'], - marginal_cost=costs.at['CCGT', 'marginal_cost'], - efficiency=costs.at['CCGT', 'efficiency']) - - elif tech.startswith('nuclear'): - nuclear = ppl.query("carrier == 'nuclear'").groupby('bus', as_index=False).first() - n.madd('Generator', nuclear.index, - suffix=' nuclear', - bus=nuclear['bus'], + if tech.startswith("OCGT"): + ocgt = ( + ppl.query("carrier in ['OCGT', 'CCGT']") + .groupby("bus", as_index=False) + .first() + ) + n.madd( + "Generator", + ocgt.index, + suffix=" OCGT", + bus=ocgt["bus"], + carrier=tech, + p_nom_extendable=True, + p_nom=0.0, + capital_cost=costs.at["OCGT", "capital_cost"], + marginal_cost=costs.at["OCGT", "marginal_cost"], + efficiency=costs.at["OCGT", "efficiency"], + ) + + elif tech.startswith("CCGT"): + ccgt = ( + ppl.query("carrier in ['OCGT', 'CCGT']") + .groupby("bus", as_index=False) + .first() + ) + n.madd( + "Generator", + ccgt.index, + suffix=" CCGT", + bus=ccgt["bus"], + carrier=tech, + p_nom_extendable=True, + p_nom=0.0, + capital_cost=costs.at["CCGT", "capital_cost"], + marginal_cost=costs.at["CCGT", "marginal_cost"], + efficiency=costs.at["CCGT", "efficiency"], + ) + + elif tech.startswith("nuclear"): + nuclear = ( + ppl.query("carrier == 'nuclear'").groupby("bus", as_index=False).first() + ) + n.madd( + "Generator", + nuclear.index, + suffix=" nuclear", + bus=nuclear["bus"], carrier=tech, p_nom_extendable=True, - p_nom=0., - capital_cost=costs.at['nuclear', 'capital_cost'], - marginal_cost=costs.at['nuclear', 'marginal_cost'], - efficiency=costs.at['nuclear', 'efficiency']) + p_nom=0.0, + capital_cost=costs.at["nuclear", "capital_cost"], + marginal_cost=costs.at["nuclear", "marginal_cost"], + efficiency=costs.at["nuclear", "efficiency"], + ) else: - raise NotImplementedError(f"Adding extendable generators for carrier " - "'{tech}' is not implemented, yet. " - "Only OCGT, CCGT and nuclear are allowed at the moment.") - + raise NotImplementedError( + f"Adding extendable generators for carrier " + "'{tech}' is not implemented, yet. " + "Only OCGT, CCGT and nuclear are allowed at the moment." + ) def attach_OPSD_renewables(n): - available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB'] - tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'} + available = ["DE", "FR", "PL", "CH", "DK", "CZ", "SE", "GB"] + tech_map = {"Onshore": "onwind", "Offshore": "offwind", "Solar": "solar"} countries = set(available) & set(n.buses.country) - techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', []) + techs = snakemake.config["electricity"].get("renewable_capacities_from_OPSD", []) tech_map = {k: v for k, v in tech_map.items() if v in techs} if not tech_map: return - logger.info(f'Using OPSD renewable capacities in {", ".join(countries)} ' - f'for technologies {", ".join(tech_map.values())}.') + logger.info( + f'Using OPSD renewable capacities in {", ".join(countries)} ' + f'for technologies {", ".join(tech_map.values())}.' + ) df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries]) - technology_b = ~df.Technology.isin(['Onshore', 'Offshore']) - df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology) - df = df.query('Fueltype in @tech_map').powerplant.convert_country_to_alpha2() + technology_b = ~df.Technology.isin(["Onshore", "Offshore"]) + df["Fueltype"] = df.Fueltype.where(technology_b, df.Technology) + df = df.query("Fueltype in @tech_map").powerplant.convert_country_to_alpha2() for fueltype, carrier_like in tech_map.items(): gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)] buses = n.buses.loc[gens.bus.unique()] - gens_per_bus = gens.groupby('bus').p_nom.count() + gens_per_bus = gens.groupby("bus").p_nom.count() - caps = map_country_bus(df.query('Fueltype == @fueltype'), buses) - caps = caps.groupby(['bus']).Capacity.sum() + caps = map_country_bus(df.query("Fueltype == @fueltype"), buses) + caps = caps.groupby(["bus"]).Capacity.sum() caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) n.generators.p_nom.update(gens.bus.map(caps).dropna()) n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) - def estimate_renewable_capacities(n, tech_map=None): if tech_map is None: - tech_map = (snakemake.config['electricity'] - .get('estimate_renewable_capacities_from_capacity_stats', {})) + tech_map = snakemake.config["electricity"].get( + "estimate_renewable_capacities_from_capacity_stats", {} + ) - if len(tech_map) == 0: return + if len(tech_map) == 0: + return - capacities = (pm.data.Capacity_stats().powerplant.convert_country_to_alpha2() - [lambda df: df.Energy_Source_Level_2] - .set_index(['Fueltype', 'Country']).sort_index()) + capacities = ( + pm.data.Capacity_stats() + .powerplant.convert_country_to_alpha2()[lambda df: df.Energy_Source_Level_2] + .set_index(["Fueltype", "Country"]) + .sort_index() + ) countries = n.buses.country.unique() - if len(countries) == 0: return + if len(countries) == 0: + return - logger.info('heuristics applied to distribute renewable capacities [MW] \n{}' - .format(capacities.query('Fueltype in @tech_map.keys() and Capacity >= 0.1') - .groupby('Country').agg({'Capacity': 'sum'}))) + logger.info( + "heuristics applied to distribute renewable capacities [MW] \n{}".format( + capacities.query("Fueltype in @tech_map.keys() and Capacity >= 0.1") + .groupby("Country") + .agg({"Capacity": "sum"}) + ) + ) for ppm_fueltype, techs in tech_map.items(): - tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\ - .reindex(countries, fill_value=0.) - #tech_i = n.generators.query('carrier in @techs').index - tech_i = (n.generators.query('carrier in @techs') - [n.generators.query('carrier in @techs') - .bus.map(n.buses.country).isin(countries)].index) - n.generators.loc[tech_i, 'p_nom'] = ( - (n.generators_t.p_max_pu[tech_i].mean() * - n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation - .groupby(n.generators.bus.map(n.buses.country)) - .transform(lambda s: normed(s) * tech_capacities.at[s.name]) - .where(lambda s: s>0.1, 0.)) # only capacities above 100kW - n.generators.loc[tech_i, 'p_nom_min'] = n.generators.loc[tech_i, 'p_nom'] + tech_capacities = capacities.loc[ppm_fueltype, "Capacity"].reindex( + countries, fill_value=0.0 + ) + # tech_i = n.generators.query('carrier in @techs').index + tech_i = n.generators.query("carrier in @techs")[ + n.generators.query("carrier in @techs") + .bus.map(n.buses.country) + .isin(countries) + ].index + n.generators.loc[tech_i, "p_nom"] = ( + ( + n.generators_t.p_max_pu[tech_i].mean() + * n.generators.loc[tech_i, "p_nom_max"] + ) # maximal yearly generation + .groupby(n.generators.bus.map(n.buses.country)) + .transform(lambda s: normed(s) * tech_capacities.at[s.name]) + .where(lambda s: s > 0.1, 0.0) + ) # only capacities above 100kW + n.generators.loc[tech_i, "p_nom_min"] = n.generators.loc[tech_i, "p_nom"] def add_nice_carrier_names(n, config=None): - if config is None: config = snakemake.config + if config is None: + config = snakemake.config carrier_i = n.carriers.index - nice_names = (pd.Series(config['plotting']['nice_names']) - .reindex(carrier_i).fillna(carrier_i.to_series().str.title())) - n.carriers['nice_name'] = nice_names - colors = pd.Series(config['plotting']['tech_colors']).reindex(carrier_i) + nice_names = ( + pd.Series(config["plotting"]["nice_names"]) + .reindex(carrier_i) + .fillna(carrier_i.to_series().str.title()) + ) + n.carriers["nice_name"] = nice_names + colors = pd.Series(config["plotting"]["tech_colors"]).reindex(carrier_i) if colors.isna().any(): missing_i = list(colors.index[colors.isna()]) - logger.warning(f'tech_colors for carriers {missing_i} not defined ' - 'in config.') - n.carriers['color'] = colors + logger.warning( + f"tech_colors for carriers {missing_i} not defined " "in config." + ) + n.carriers["color"] = colors if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('add_electricity') + + snakemake = mock_snakemake("add_electricity") configure_logging(snakemake) n = pypsa.Network(snakemake.input.base_network) - Nyears = n.snapshot_weightings.objective.sum() / 8760. + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 costs = load_costs(Nyears) ppl = load_powerplants() diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 88f7d35ca..e57ff04d6 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -56,8 +57,11 @@ import pandas as pd import numpy as np -from add_electricity import (load_costs, add_nice_carrier_names, - _add_missing_carriers_from_costs) +from add_electricity import ( + load_costs, + add_nice_carrier_names, + _add_missing_carriers_from_costs, +) idx = pd.IndexSlice @@ -65,9 +69,9 @@ def attach_storageunits(n, costs): - elec_opts = snakemake.config['electricity'] - carriers = elec_opts['extendable_carriers']['StorageUnit'] - max_hours = elec_opts['max_hours'] + elec_opts = snakemake.config["electricity"] + carriers = elec_opts["extendable_carriers"]["StorageUnit"] + max_hours = elec_opts["max_hours"] _add_missing_carriers_from_costs(n, costs, carriers) @@ -77,130 +81,168 @@ def attach_storageunits(n, costs): lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} for carrier in carriers: - n.madd("StorageUnit", buses_i, ' ' + carrier, - bus=buses_i, - carrier=carrier, - p_nom_extendable=True, - capital_cost=costs.at[carrier, 'capital_cost'], - marginal_cost=costs.at[carrier, 'marginal_cost'], - efficiency_store=costs.at[lookup_store[carrier], 'efficiency'], - efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency'], - max_hours=max_hours[carrier], - cyclic_state_of_charge=True) + n.madd( + "StorageUnit", + buses_i, + " " + carrier, + bus=buses_i, + carrier=carrier, + p_nom_extendable=True, + capital_cost=costs.at[carrier, "capital_cost"], + marginal_cost=costs.at[carrier, "marginal_cost"], + efficiency_store=costs.at[lookup_store[carrier], "efficiency"], + efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"], + max_hours=max_hours[carrier], + cyclic_state_of_charge=True, + ) def attach_stores(n, costs): - elec_opts = snakemake.config['electricity'] - carriers = elec_opts['extendable_carriers']['Store'] + elec_opts = snakemake.config["electricity"] + carriers = elec_opts["extendable_carriers"]["Store"] _add_missing_carriers_from_costs(n, costs, carriers) buses_i = n.buses.index - bus_sub_dict = {k: n.buses[k].values for k in ['x', 'y', 'country']} + bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} - if 'H2' in carriers: + if "H2" in carriers: h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) - n.madd("Store", h2_buses_i, - bus=h2_buses_i, - carrier='H2', - e_nom_extendable=True, - e_cyclic=True, - capital_cost=costs.at["hydrogen storage", "capital_cost"]) - - n.madd("Link", h2_buses_i + " Electrolysis", - bus0=buses_i, - bus1=h2_buses_i, - carrier='H2 electrolysis', - p_nom_extendable=True, - efficiency=costs.at["electrolysis", "efficiency"], - capital_cost=costs.at["electrolysis", "capital_cost"], - marginal_cost=costs.at["electrolysis", "marginal_cost"]) - - n.madd("Link", h2_buses_i + " Fuel Cell", - bus0=h2_buses_i, - bus1=buses_i, - carrier='H2 fuel cell', - p_nom_extendable=True, - efficiency=costs.at["fuel cell", "efficiency"], - #NB: fixed cost is per MWel - capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], - marginal_cost=costs.at["fuel cell", "marginal_cost"]) - - if 'battery' in carriers: - b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict) - - n.madd("Store", b_buses_i, - bus=b_buses_i, - carrier='battery', - e_cyclic=True, - e_nom_extendable=True, - capital_cost=costs.at['battery storage', 'capital_cost'], - marginal_cost=costs.at["battery", "marginal_cost"]) - - n.madd("Link", b_buses_i + " charger", - bus0=buses_i, - bus1=b_buses_i, - carrier='battery charger', - efficiency=costs.at['battery inverter', 'efficiency'], - capital_cost=costs.at['battery inverter', 'capital_cost'], - p_nom_extendable=True, - marginal_cost=costs.at["battery inverter", "marginal_cost"]) - - n.madd("Link", b_buses_i + " discharger", - bus0=b_buses_i, - bus1=buses_i, - carrier='battery discharger', - efficiency=costs.at['battery inverter','efficiency'], - p_nom_extendable=True, - marginal_cost=costs.at["battery inverter", "marginal_cost"]) + n.madd( + "Store", + h2_buses_i, + bus=h2_buses_i, + carrier="H2", + e_nom_extendable=True, + e_cyclic=True, + capital_cost=costs.at["hydrogen storage", "capital_cost"], + ) + + n.madd( + "Link", + h2_buses_i + " Electrolysis", + bus0=buses_i, + bus1=h2_buses_i, + carrier="H2 electrolysis", + p_nom_extendable=True, + efficiency=costs.at["electrolysis", "efficiency"], + capital_cost=costs.at["electrolysis", "capital_cost"], + marginal_cost=costs.at["electrolysis", "marginal_cost"], + ) + + n.madd( + "Link", + h2_buses_i + " Fuel Cell", + bus0=h2_buses_i, + bus1=buses_i, + carrier="H2 fuel cell", + p_nom_extendable=True, + efficiency=costs.at["fuel cell", "efficiency"], + # NB: fixed cost is per MWel + capital_cost=costs.at["fuel cell", "capital_cost"] + * costs.at["fuel cell", "efficiency"], + marginal_cost=costs.at["fuel cell", "marginal_cost"], + ) + + if "battery" in carriers: + b_buses_i = n.madd( + "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict + ) + + n.madd( + "Store", + b_buses_i, + bus=b_buses_i, + carrier="battery", + e_cyclic=True, + e_nom_extendable=True, + capital_cost=costs.at["battery storage", "capital_cost"], + marginal_cost=costs.at["battery", "marginal_cost"], + ) + + n.madd( + "Link", + b_buses_i + " charger", + bus0=buses_i, + bus1=b_buses_i, + carrier="battery charger", + efficiency=costs.at["battery inverter", "efficiency"], + capital_cost=costs.at["battery inverter", "capital_cost"], + p_nom_extendable=True, + marginal_cost=costs.at["battery inverter", "marginal_cost"], + ) + + n.madd( + "Link", + b_buses_i + " discharger", + bus0=b_buses_i, + bus1=buses_i, + carrier="battery discharger", + efficiency=costs.at["battery inverter", "efficiency"], + p_nom_extendable=True, + marginal_cost=costs.at["battery inverter", "marginal_cost"], + ) def attach_hydrogen_pipelines(n, costs): - elec_opts = snakemake.config['electricity'] - ext_carriers = elec_opts['extendable_carriers'] - as_stores = ext_carriers.get('Store', []) + elec_opts = snakemake.config["electricity"] + ext_carriers = elec_opts["extendable_carriers"] + as_stores = ext_carriers.get("Store", []) - if 'H2 pipeline' not in ext_carriers.get('Link',[]): return + if "H2 pipeline" not in ext_carriers.get("Link", []): + return - assert 'H2' in as_stores, ("Attaching hydrogen pipelines requires hydrogen " - "storage to be modelled as Store-Link-Bus combination. See " - "`config.yaml` at `electricity: extendable_carriers: Store:`.") + assert "H2" in as_stores, ( + "Attaching hydrogen pipelines requires hydrogen " + "storage to be modelled as Store-Link-Bus combination. See " + "`config.yaml` at `electricity: extendable_carriers: Store:`." + ) # determine bus pairs - attrs = ["bus0","bus1","length"] - candidates = pd.concat([n.lines[attrs], n.links.query('carrier=="DC"')[attrs]])\ - .reset_index(drop=True) + attrs = ["bus0", "bus1", "length"] + candidates = pd.concat( + [n.lines[attrs], n.links.query('carrier=="DC"')[attrs]] + ).reset_index(drop=True) # remove bus pair duplicates regardless of order of bus0 and bus1 - h2_links = candidates[~pd.DataFrame(np.sort(candidates[['bus0', 'bus1']])).duplicated()] + h2_links = candidates[ + ~pd.DataFrame(np.sort(candidates[["bus0", "bus1"]])).duplicated() + ] h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1) # add pipelines - n.madd("Link", - h2_links.index, - bus0=h2_links.bus0.values + " H2", - bus1=h2_links.bus1.values + " H2", - p_min_pu=-1, - p_nom_extendable=True, - length=h2_links.length.values, - capital_cost=costs.at['H2 pipeline','capital_cost']*h2_links.length, - efficiency=costs.at['H2 pipeline','efficiency'], - carrier="H2 pipeline") + n.madd( + "Link", + h2_links.index, + bus0=h2_links.bus0.values + " H2", + bus1=h2_links.bus1.values + " H2", + p_min_pu=-1, + p_nom_extendable=True, + length=h2_links.length.values, + capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length, + efficiency=costs.at["H2 pipeline", "efficiency"], + carrier="H2 pipeline", + ) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('add_extra_components', network='elec', - simpl='', clusters=5) + + snakemake = mock_snakemake( + "add_extra_components", network="elec", simpl="", clusters=5 + ) configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs, - config=snakemake.config['costs'], - elec_config=snakemake.config['electricity']) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs( + Nyears, + tech_costs=snakemake.input.tech_costs, + config=snakemake.config["costs"], + elec_config=snakemake.config["electricity"], + ) attach_storageunits(n, costs) attach_stores(n, costs) diff --git a/scripts/base_network.py b/scripts/base_network.py index 514e4dc3d..f1cf31170 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -97,48 +98,73 @@ def _get_country(df): def _find_closest_links(links, new_links, distance_upper_bound=1.5): - treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten() - for s in links.geometry]) - querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']], - new_links[['x2', 'y2', 'x1', 'y1']]]) + treecoords = np.asarray( + [np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten() for s in links.geometry] + ) + querycoords = np.vstack( + [new_links[["x1", "y1", "x2", "y2"]], new_links[["x2", "y2", "x1", "y1"]]] + ) tree = spatial.KDTree(treecoords) dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound) found_b = ind < len(links) - found_i = np.arange(len(new_links)*2)[found_b] % len(new_links) - return pd.DataFrame(dict(D=dist[found_b], - i=links.index[ind[found_b] % len(links)]), - index=new_links.index[found_i]).sort_values(by='D')\ - [lambda ds: ~ds.index.duplicated(keep='first')]\ - .sort_index()['i'] + found_i = np.arange(len(new_links) * 2)[found_b] % len(new_links) + return ( + pd.DataFrame( + dict(D=dist[found_b], i=links.index[ind[found_b] % len(links)]), + index=new_links.index[found_i], + ) + .sort_values(by="D")[lambda ds: ~ds.index.duplicated(keep="first")] + .sort_index()["i"] + ) def _load_buses_from_eg(): - buses = (pd.read_csv(snakemake.input.eg_buses, quotechar="'", - true_values=['t'], false_values=['f'], - dtype=dict(bus_id="str")) - .set_index("bus_id") - .drop(['station_id'], axis=1) - .rename(columns=dict(voltage='v_nom'))) + buses = ( + pd.read_csv( + snakemake.input.eg_buses, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(bus_id="str"), + ) + .set_index("bus_id") + .drop(["station_id"], axis=1) + .rename(columns=dict(voltage="v_nom")) + ) - buses['carrier'] = buses.pop('dc').map({True: 'DC', False: 'AC'}) - buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool) + buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) + buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) # remove all buses outside of all countries including exclusive economic zones (offshore) - europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry'] + europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, "geometry"] europe_shape_prepped = shapely.prepared.prep(europe_shape) - buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) + buses_in_europe_b = buses[["x", "y"]].apply( + lambda p: europe_shape_prepped.contains(Point(p)), axis=1 + ) - buses_with_v_nom_to_keep_b = buses.v_nom.isin(snakemake.config['electricity']['voltages']) | buses.v_nom.isnull() - logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(snakemake.config['electricity']['voltages']))) + buses_with_v_nom_to_keep_b = ( + buses.v_nom.isin(snakemake.config["electricity"]["voltages"]) + | buses.v_nom.isnull() + ) + logger.info( + "Removing buses with voltages {}".format( + pd.Index(buses.v_nom.unique()) + .dropna() + .difference(snakemake.config["electricity"]["voltages"]) + ) + ) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) def _load_transformers_from_eg(buses): - transformers = (pd.read_csv(snakemake.input.eg_transformers, quotechar="'", - true_values=['t'], false_values=['f'], - dtype=dict(transformer_id='str', bus0='str', bus1='str')) - .set_index('transformer_id')) + transformers = pd.read_csv( + snakemake.input.eg_transformers, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(transformer_id="str", bus0="str", bus1="str"), + ).set_index("transformer_id") transformers = _remove_dangling_branches(transformers, buses) @@ -146,32 +172,39 @@ def _load_transformers_from_eg(buses): def _load_converters_from_eg(buses): - converters = (pd.read_csv(snakemake.input.eg_converters, quotechar="'", - true_values=['t'], false_values=['f'], - dtype=dict(converter_id='str', bus0='str', bus1='str')) - .set_index('converter_id')) + converters = pd.read_csv( + snakemake.input.eg_converters, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(converter_id="str", bus0="str", bus1="str"), + ).set_index("converter_id") converters = _remove_dangling_branches(converters, buses) - converters['carrier'] = 'B2B' + converters["carrier"] = "B2B" return converters def _load_links_from_eg(buses): - links = (pd.read_csv(snakemake.input.eg_links, quotechar="'", true_values=['t'], false_values=['f'], - dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool")) - .set_index('link_id')) + links = pd.read_csv( + snakemake.input.eg_links, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict(link_id="str", bus0="str", bus1="str", under_construction="bool"), + ).set_index("link_id") - links['length'] /= 1e3 + links["length"] /= 1e3 # hotfix - links.loc[links.bus1=='6271', 'bus1'] = '6273' + links.loc[links.bus1 == "6271", "bus1"] = "6273" links = _remove_dangling_branches(links, buses) # Add DC line parameters - links['carrier'] = 'DC' + links["carrier"] = "DC" return links @@ -180,15 +213,21 @@ def _add_links_from_tyndp(buses, links): links_tyndp = pd.read_csv(snakemake.input.links_tyndp) # remove all links from list which lie outside all of the desired countries - europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry'] + europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, "geometry"] europe_shape_prepped = shapely.prepared.prep(europe_shape) - x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) - x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) + x1y1_in_europe_b = links_tyndp[["x1", "y1"]].apply( + lambda p: europe_shape_prepped.contains(Point(p)), axis=1 + ) + x2y2_in_europe_b = links_tyndp[["x2", "y2"]].apply( + lambda p: europe_shape_prepped.contains(Point(p)), axis=1 + ) is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b if not is_within_covered_countries_b.all(): - logger.info("TYNDP links outside of the covered area (skipping): " + - ", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"])) + logger.info( + "TYNDP links outside of the covered area (skipping): " + + ", ".join(links_tyndp.loc[~is_within_covered_countries_b, "Name"]) + ) links_tyndp = links_tyndp.loc[is_within_covered_countries_b] if links_tyndp.empty: @@ -196,25 +235,32 @@ def _add_links_from_tyndp(buses, links): has_replaces_b = links_tyndp.replaces.notnull() oids = dict(Bus=_get_oid(buses), Link=_get_oid(links)) - keep_b = dict(Bus=pd.Series(True, index=buses.index), - Link=pd.Series(True, index=links.index)) - for reps in links_tyndp.loc[has_replaces_b, 'replaces']: - for comps in reps.split(':'): - oids_to_remove = comps.split('.') + keep_b = dict( + Bus=pd.Series(True, index=buses.index), Link=pd.Series(True, index=links.index) + ) + for reps in links_tyndp.loc[has_replaces_b, "replaces"]: + for comps in reps.split(":"): + oids_to_remove = comps.split(".") c = oids_to_remove.pop(0) keep_b[c] &= ~oids[c].isin(oids_to_remove) - buses = buses.loc[keep_b['Bus']] - links = links.loc[keep_b['Link']] + buses = buses.loc[keep_b["Bus"]] + links = links.loc[keep_b["Link"]] - links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20) + links_tyndp["j"] = _find_closest_links( + links, links_tyndp, distance_upper_bound=0.20 + ) # Corresponds approximately to 20km tolerances if links_tyndp["j"].notnull().any(): - logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])) + logger.info( + "TYNDP links already in the dataset (skipping): " + + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]) + ) links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()] - if links_tyndp.empty: return buses, links + if links_tyndp.empty: + return buses, links - tree = spatial.KDTree(buses[['x', 'y']]) + tree = spatial.KDTree(buses[["x", "y"]]) _, ind0 = tree.query(links_tyndp[["x1", "y1"]]) ind0_b = ind0 < len(buses) links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]] @@ -223,24 +269,42 @@ def _add_links_from_tyndp(buses, links): ind1_b = ind1 < len(buses) links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]] - links_tyndp_located_b = links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull() + links_tyndp_located_b = ( + links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull() + ) if not links_tyndp_located_b.all(): - logger.warning("Did not find connected buses for TYNDP links (skipping): " + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"])) + logger.warning( + "Did not find connected buses for TYNDP links (skipping): " + + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"]) + ) links_tyndp = links_tyndp.loc[links_tyndp_located_b] logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"])) links_tyndp = links_tyndp[["bus0", "bus1"]].assign( - carrier='DC', + carrier="DC", p_nom=links_tyndp["Power (MW)"], - length=links_tyndp["Length (given) (km)"].fillna(links_tyndp["Length (distance*1.2) (km)"]), + length=links_tyndp["Length (given) (km)"].fillna( + links_tyndp["Length (distance*1.2) (km)"] + ), under_construction=True, underground=False, - geometry=(links_tyndp[["x1", "y1", "x2", "y2"]] - .apply(lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1)), - tags=('"name"=>"' + links_tyndp["Name"] + '", ' + - '"ref"=>"' + links_tyndp["Ref"] + '", ' + - '"status"=>"' + links_tyndp["status"] + '"') + geometry=( + links_tyndp[["x1", "y1", "x2", "y2"]].apply( + lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1 + ) + ), + tags=( + '"name"=>"' + + links_tyndp["Name"] + + '", ' + + '"ref"=>"' + + links_tyndp["Ref"] + + '", ' + + '"status"=>"' + + links_tyndp["status"] + + '"' + ), ) links_tyndp.index = "T" + links_tyndp.index.astype(str) @@ -249,13 +313,25 @@ def _add_links_from_tyndp(buses, links): def _load_lines_from_eg(buses): - lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values=['t'], false_values=['f'], - dtype=dict(line_id='str', bus0='str', bus1='str', - underground="bool", under_construction="bool")) - .set_index('line_id') - .rename(columns=dict(voltage='v_nom', circuits='num_parallel'))) + lines = ( + pd.read_csv( + snakemake.input.eg_lines, + quotechar="'", + true_values=["t"], + false_values=["f"], + dtype=dict( + line_id="str", + bus0="str", + bus1="str", + underground="bool", + under_construction="bool", + ), + ) + .set_index("line_id") + .rename(columns=dict(voltage="v_nom", circuits="num_parallel")) + ) - lines['length'] /= 1e3 + lines["length"] /= 1e3 lines = _remove_dangling_branches(lines, buses) @@ -266,18 +342,20 @@ def _apply_parameter_corrections(n): with open(snakemake.input.parameter_corrections) as f: corrections = yaml.safe_load(f) - if corrections is None: return + if corrections is None: + return for component, attrs in corrections.items(): df = n.df(component) oid = _get_oid(df) - if attrs is None: continue + if attrs is None: + continue for attr, repls in attrs.items(): for i, r in repls.items(): - if i == 'oid': + if i == "oid": r = oid.map(repls["oid"]).dropna() - elif i == 'index': + elif i == "index": r = pd.Series(repls["index"]) else: raise NotImplementedError() @@ -286,78 +364,87 @@ def _apply_parameter_corrections(n): def _set_electrical_parameters_lines(lines): - v_noms = snakemake.config['electricity']['voltages'] - linetypes = snakemake.config['lines']['types'] + v_noms = snakemake.config["electricity"]["voltages"] + linetypes = snakemake.config["lines"]["types"] for v_nom in v_noms: - lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom] + lines.loc[lines["v_nom"] == v_nom, "type"] = linetypes[v_nom] - lines['s_max_pu'] = snakemake.config['lines']['s_max_pu'] + lines["s_max_pu"] = snakemake.config["lines"]["s_max_pu"] return lines def _set_lines_s_nom_from_linetypes(n): - n.lines['s_nom'] = ( - np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * - n.lines['v_nom'] * n.lines.num_parallel + n.lines["s_nom"] = ( + np.sqrt(3) + * n.lines["type"].map(n.line_types.i_nom) + * n.lines["v_nom"] + * n.lines.num_parallel ) def _set_electrical_parameters_links(links): - if links.empty: return links + if links.empty: + return links - p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) - links['p_max_pu'] = p_max_pu - links['p_min_pu'] = -p_max_pu + p_max_pu = snakemake.config["links"].get("p_max_pu", 1.0) + links["p_max_pu"] = p_max_pu + links["p_min_pu"] = -p_max_pu links_p_nom = pd.read_csv(snakemake.input.links_p_nom) # filter links that are not in operation anymore - removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False) + removed_b = links_p_nom.Remarks.str.contains("Shut down|Replaced", na=False) links_p_nom = links_p_nom[~removed_b] # find closest link for all links in links_p_nom - links_p_nom['j'] = _find_closest_links(links, links_p_nom) + links_p_nom["j"] = _find_closest_links(links, links_p_nom) - links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'}) + links_p_nom = links_p_nom.groupby(["j"], as_index=False).agg({"Power (MW)": "sum"}) p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"] # Don't update p_nom if it's already set - p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom + p_nom_unset = ( + p_nom.drop(links.index[links.p_nom.notnull()], errors="ignore") + if "p_nom" in links + else p_nom + ) links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset return links def _set_electrical_parameters_converters(converters): - p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) - converters['p_max_pu'] = p_max_pu - converters['p_min_pu'] = -p_max_pu + p_max_pu = snakemake.config["links"].get("p_max_pu", 1.0) + converters["p_max_pu"] = p_max_pu + converters["p_min_pu"] = -p_max_pu - converters['p_nom'] = 2000 + converters["p_nom"] = 2000 # Converters are combined with links - converters['under_construction'] = False - converters['underground'] = False + converters["under_construction"] = False + converters["underground"] = False return converters def _set_electrical_parameters_transformers(transformers): - config = snakemake.config['transformers'] + config = snakemake.config["transformers"] ## Add transformer parameters - transformers["x"] = config.get('x', 0.1) - transformers["s_nom"] = config.get('s_nom', 2000) - transformers['type'] = config.get('type', '') + transformers["x"] = config.get("x", 0.1) + transformers["s_nom"] = config.get("s_nom", 2000) + transformers["type"] = config.get("type", "") return transformers def _remove_dangling_branches(branches, buses): - return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]) + return pd.DataFrame( + branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)] + ) def _remove_unconnected_components(network): @@ -367,8 +454,13 @@ def _remove_unconnected_components(network): component_sizes = component.value_counts() components_to_remove = component_sizes.iloc[1:] - logger.info("Removing {} unconnected network components with less than {} buses. In total {} buses." - .format(len(components_to_remove), components_to_remove.max(), components_to_remove.sum())) + logger.info( + "Removing {} unconnected network components with less than {} buses. In total {} buses.".format( + len(components_to_remove), + components_to_remove.max(), + components_to_remove.sum(), + ) + ) return network[component == component_sizes.index[0]] @@ -380,31 +472,45 @@ def _set_countries_and_substations(n): def buses_in_shape(shape): shape = shapely.prepared.prep(shape) return pd.Series( - np.fromiter((shape.contains(Point(x, y)) - for x, y in buses.loc[:,["x", "y"]].values), - dtype=bool, count=len(buses)), - index=buses.index + np.fromiter( + ( + shape.contains(Point(x, y)) + for x, y in buses.loc[:, ["x", "y"]].values + ), + dtype=bool, + count=len(buses), + ), + index=buses.index, ) - countries = snakemake.config['countries'] - country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'] - offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry'] - substation_b = buses['symbol'].str.contains('substation|converter station', case=False) + countries = snakemake.config["countries"] + country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[ + "geometry" + ] + offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index("name")[ + "geometry" + ] + substation_b = buses["symbol"].str.contains( + "substation|converter station", case=False + ) def prefer_voltage(x, which): index = x.index if len(index) == 1: return pd.Series(index, index) - key = (x.index[0] - if x['v_nom'].isnull().all() - else getattr(x['v_nom'], 'idx' + which)()) + key = ( + x.index[0] + if x["v_nom"].isnull().all() + else getattr(x["v_nom"], "idx" + which)() + ) return pd.Series(key, index) - gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False, - group_keys=False, sort=False) - bus_map_low = gb.apply(prefer_voltage, 'min') + gb = buses.loc[substation_b].groupby( + ["x", "y"], as_index=False, group_keys=False, sort=False + ) + bus_map_low = gb.apply(prefer_voltage, "min") lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) - bus_map_high = gb.apply(prefer_voltage, 'max') + bus_map_high = gb.apply(prefer_voltage, "max") hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) onshore_b = pd.Series(False, buses.index) @@ -415,47 +521,66 @@ def prefer_voltage(x, which): onshore_country_b = buses_in_shape(onshore_shape) onshore_b |= onshore_country_b - buses.loc[onshore_country_b, 'country'] = country + buses.loc[onshore_country_b, "country"] = country - if country not in offshore_shapes.index: continue + if country not in offshore_shapes.index: + continue offshore_country_b = buses_in_shape(offshore_shapes[country]) offshore_b |= offshore_country_b - buses.loc[offshore_country_b, 'country'] = country + buses.loc[offshore_country_b, "country"] = country # Only accept buses as low-voltage substations (where load is attached), if # they have at least one connection which is not under_construction has_connections_b = pd.Series(False, index=buses.index) - for b, df in product(('bus0', 'bus1'), (n.lines, n.links)): - has_connections_b |= ~ df.groupby(b).under_construction.min() + for b, df in product(("bus0", "bus1"), (n.lines, n.links)): + has_connections_b |= ~df.groupby(b).under_construction.min() - buses['substation_lv'] = lv_b & onshore_b & (~ buses['under_construction']) & has_connections_b - buses['substation_off'] = (offshore_b | (hv_b & onshore_b)) & (~ buses['under_construction']) + buses["substation_lv"] = ( + lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b + ) + buses["substation_off"] = (offshore_b | (hv_b & onshore_b)) & ( + ~buses["under_construction"] + ) c_nan_b = buses.country.isnull() if c_nan_b.sum() > 0: c_tag = _get_country(buses.loc[c_nan_b]) c_tag.loc[~c_tag.isin(countries)] = np.nan - n.buses.loc[c_nan_b, 'country'] = c_tag + n.buses.loc[c_nan_b, "country"] = c_tag c_tag_nan_b = n.buses.country.isnull() # Nearest country in path length defines country of still homeless buses # Work-around until commit 705119 lands in pypsa release - n.transformers['length'] = 0. - graph = n.graph(weight='length') - n.transformers.drop('length', axis=1, inplace=True) + n.transformers["length"] = 0.0 + graph = n.graph(weight="length") + n.transformers.drop("length", axis=1, inplace=True) for b in n.buses.index[c_tag_nan_b]: - df = (pd.DataFrame(dict(pathlength=nx.single_source_dijkstra_path_length(graph, b, cutoff=200))) - .join(n.buses.country).dropna()) - assert not df.empty, "No buses with defined country within 200km of bus `{}`".format(b) - n.buses.at[b, 'country'] = df.loc[df.pathlength.idxmin(), 'country'] - - logger.warning("{} buses are not in any country or offshore shape," - " {} have been assigned from the tag of the entsoe map," - " the rest from the next bus in terms of pathlength." - .format(c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum())) + df = ( + pd.DataFrame( + dict( + pathlength=nx.single_source_dijkstra_path_length( + graph, b, cutoff=200 + ) + ) + ) + .join(n.buses.country) + .dropna() + ) + assert ( + not df.empty + ), "No buses with defined country within 200km of bus `{}`".format(b) + n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"] + + logger.warning( + "{} buses are not in any country or offshore shape," + " {} have been assigned from the tag of the entsoe map," + " the rest from the next bus in terms of pathlength.".format( + c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum() + ) + ) return buses @@ -464,11 +589,13 @@ def _replace_b2b_converter_at_country_border_by_link(n): # Affects only the B2B converter in Lithuania at the Polish border at the moment buscntry = n.buses.country linkcntry = n.links.bus0.map(buscntry) - converters_i = n.links.index[(n.links.carrier == 'B2B') & (linkcntry == n.links.bus1.map(buscntry))] + converters_i = n.links.index[ + (n.links.carrier == "B2B") & (linkcntry == n.links.bus1.map(buscntry)) + ] def findforeignbus(G, i): cntry = linkcntry.at[i] - for busattr in ('bus0', 'bus1'): + for busattr in ("bus0", "bus1"): b0 = n.links.at[i, busattr] for b1 in G[b0]: if buscntry[b1] != cntry: @@ -481,53 +608,68 @@ def findforeignbus(G, i): if busattr is not None: comp, line = next(iter(G[b0][b1])) if comp != "Line": - logger.warning("Unable to replace B2B `{}` expected a Line, but found a {}" - .format(i, comp)) + logger.warning( + "Unable to replace B2B `{}` expected a Line, but found a {}".format( + i, comp + ) + ) continue n.links.at[i, busattr] = b1 - n.links.at[i, 'p_nom'] = min(n.links.at[i, 'p_nom'], n.lines.at[line, 's_nom']) - n.links.at[i, 'carrier'] = 'DC' - n.links.at[i, 'underwater_fraction'] = 0. - n.links.at[i, 'length'] = n.lines.at[line, 'length'] + n.links.at[i, "p_nom"] = min( + n.links.at[i, "p_nom"], n.lines.at[line, "s_nom"] + ) + n.links.at[i, "carrier"] = "DC" + n.links.at[i, "underwater_fraction"] = 0.0 + n.links.at[i, "length"] = n.lines.at[line, "length"] n.remove("Line", line) n.remove("Bus", b0) - logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}" - .format(i, b0, line, linkcntry.at[i], buscntry.at[b1])) + logger.info( + "Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}".format( + i, b0, line, linkcntry.at[i], buscntry.at[b1] + ) + ) def _set_links_underwater_fraction(n): - if n.links.empty: return + if n.links.empty: + return - if not hasattr(n.links, 'geometry'): - n.links['underwater_fraction'] = 0. + if not hasattr(n.links, "geometry"): + n.links["underwater_fraction"] = 0.0 else: offshore_shape = gpd.read_file(snakemake.input.offshore_shapes).unary_union links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads)) - n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length + n.links["underwater_fraction"] = ( + links.intersection(offshore_shape).length / links.length + ) def _adjust_capacities_of_under_construction_branches(n): - lines_mode = snakemake.config['lines'].get('under_construction', 'undef') - if lines_mode == 'zero': - n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0. - n.lines.loc[n.lines.under_construction, 's_nom'] = 0. - elif lines_mode == 'remove': + lines_mode = snakemake.config["lines"].get("under_construction", "undef") + if lines_mode == "zero": + n.lines.loc[n.lines.under_construction, "num_parallel"] = 0.0 + n.lines.loc[n.lines.under_construction, "s_nom"] = 0.0 + elif lines_mode == "remove": n.mremove("Line", n.lines.index[n.lines.under_construction]) - elif lines_mode != 'keep': - logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.") + elif lines_mode != "keep": + logger.warning( + "Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines." + ) - links_mode = snakemake.config['links'].get('under_construction', 'undef') - if links_mode == 'zero': - n.links.loc[n.links.under_construction, "p_nom"] = 0. - elif links_mode == 'remove': + links_mode = snakemake.config["links"].get("under_construction", "undef") + if links_mode == "zero": + n.links.loc[n.links.under_construction, "p_nom"] = 0.0 + elif links_mode == "remove": n.mremove("Link", n.links.index[n.links.under_construction]) - elif links_mode != 'keep': - logger.warning("Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links.") + elif links_mode != "keep": + logger.warning( + "Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links." + ) - if lines_mode == 'remove' or links_mode == 'remove': + if lines_mode == "remove" or links_mode == "remove": # We might need to remove further unconnected components n = _remove_unconnected_components(n) @@ -538,7 +680,7 @@ def base_network(): buses = _load_buses_from_eg() links = _load_links_from_eg(buses) - if snakemake.config['links'].get('include_tyndp'): + if snakemake.config["links"].get("include_tyndp"): buses, links = _add_links_from_tyndp(buses, links) converters = _load_converters_from_eg(buses) @@ -552,10 +694,10 @@ def base_network(): converters = _set_electrical_parameters_converters(converters) n = pypsa.Network() - n.name = 'PyPSA-Eur' + n.name = "PyPSA-Eur" - n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots'])) - n.snapshot_weightings[:] *= 8760. / n.snapshot_weightings.sum() + n.set_snapshots(pd.date_range(freq="h", **snakemake.config["snapshots"])) + n.snapshot_weightings[:] *= 8760.0 / n.snapshot_weightings.sum() n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(lines, "Line") @@ -579,10 +721,12 @@ def base_network(): return n + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('base_network') + + snakemake = mock_snakemake("base_network") configure_logging(snakemake) n = base_network() diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index d91d0575b..6b03a23a9 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -57,22 +58,27 @@ def save_to_geojson(s, fn): if os.path.exists(fn): os.unlink(fn) - schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'} - s.to_file(fn, driver='GeoJSON', schema=schema) + schema = {**gpd.io.file.infer_schema(s), "geometry": "Unknown"} + s.to_file(fn, driver="GeoJSON", schema=schema) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_bus_regions') + + snakemake = mock_snakemake("build_bus_regions") configure_logging(snakemake) - countries = snakemake.config['countries'] + countries = snakemake.config["countries"] n = pypsa.Network(snakemake.input.base_network) - country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'] - offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry'] + country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[ + "geometry" + ] + offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index("name")[ + "geometry" + ] onshore_regions = [] offshore_regions = [] @@ -82,27 +88,41 @@ def save_to_geojson(s, fn): onshore_shape = country_shapes[country] onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]] - onshore_regions.append(gpd.GeoDataFrame({ - 'name': onshore_locs.index, - 'x': onshore_locs['x'], - 'y': onshore_locs['y'], - 'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape), - 'country': country - })) - - if country not in offshore_shapes.index: continue + onshore_regions.append( + gpd.GeoDataFrame( + { + "name": onshore_locs.index, + "x": onshore_locs["x"], + "y": onshore_locs["y"], + "geometry": voronoi_partition_pts( + onshore_locs.values, onshore_shape + ), + "country": country, + } + ) + ) + + if country not in offshore_shapes.index: + continue offshore_shape = offshore_shapes[country] offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]] - offshore_regions_c = gpd.GeoDataFrame({ - 'name': offshore_locs.index, - 'x': offshore_locs['x'], - 'y': offshore_locs['y'], - 'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape), - 'country': country - }) + offshore_regions_c = gpd.GeoDataFrame( + { + "name": offshore_locs.index, + "x": offshore_locs["x"], + "y": offshore_locs["y"], + "geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape), + "country": country, + } + ) offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] offshore_regions.append(offshore_regions_c) - save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore) + save_to_geojson( + pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore + ) - save_to_geojson(pd.concat(offshore_regions, ignore_index=True), snakemake.output.regions_offshore) + save_to_geojson( + pd.concat(offshore_regions, ignore_index=True), + snakemake.output.regions_offshore, + ) diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 78eafac63..5194fca9f 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2021 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -101,30 +102,30 @@ logger = logging.getLogger(__name__) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_cutout', cutout='europe-2013-era5') + + snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5") configure_logging(snakemake) - cutout_params = snakemake.config['atlite']['cutouts'][snakemake.wildcards.cutout] + cutout_params = snakemake.config["atlite"]["cutouts"][snakemake.wildcards.cutout] - snapshots = pd.date_range(freq='h', **snakemake.config['snapshots']) + snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"]) time = [snapshots[0], snapshots[-1]] - cutout_params['time'] = slice(*cutout_params.get('time', time)) + cutout_params["time"] = slice(*cutout_params.get("time", time)) - if {'x', 'y', 'bounds'}.isdisjoint(cutout_params): + if {"x", "y", "bounds"}.isdisjoint(cutout_params): # Determine the bounds from bus regions with a buffer of two grid cells onshore = gpd.read_file(snakemake.input.regions_onshore) offshore = gpd.read_file(snakemake.input.regions_offshore) - regions = onshore.append(offshore) - d = max(cutout_params.get('dx', 0.25), cutout_params.get('dy', 0.25))*2 - cutout_params['bounds'] = regions.total_bounds + [-d, -d, d, d] - elif {'x', 'y'}.issubset(cutout_params): - cutout_params['x'] = slice(*cutout_params['x']) - cutout_params['y'] = slice(*cutout_params['y']) - + regions = onshore.append(offshore) + d = max(cutout_params.get("dx", 0.25), cutout_params.get("dy", 0.25)) * 2 + cutout_params["bounds"] = regions.total_bounds + [-d, -d, d, d] + elif {"x", "y"}.issubset(cutout_params): + cutout_params["x"] = slice(*cutout_params["x"]) + cutout_params["y"] = slice(*cutout_params["y"]) logging.info(f"Preparing cutout with parameters {cutout_params}.") - features = cutout_params.pop('features', None) + features = cutout_params.pop("features", None) cutout = atlite.Cutout(snakemake.output[0], **cutout_params) cutout.prepare(features=features) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 6ac592626..95f9a66a6 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # @@ -69,27 +70,34 @@ logger = logging.getLogger(__name__) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_hydro_profile') + + snakemake = mock_snakemake("build_hydro_profile") configure_logging(snakemake) - config = snakemake.config['renewable']['hydro'] + config = snakemake.config["renewable"]["hydro"] cutout = atlite.Cutout(snakemake.input.cutout) - countries = snakemake.config['countries'] - country_shapes = (gpd.read_file(snakemake.input.country_shapes) - .set_index('name')['geometry'].reindex(countries)) - country_shapes.index.name = 'countries' + countries = snakemake.config["countries"] + country_shapes = ( + gpd.read_file(snakemake.input.country_shapes) + .set_index("name")["geometry"] + .reindex(countries) + ) + country_shapes.index.name = "countries" eia_stats = vhydro.get_eia_annual_hydro_generation( - snakemake.input.eia_hydro_generation).reindex(columns=countries) - inflow = cutout.runoff(shapes=country_shapes, - smooth=True, - lower_threshold_quantile=True, - normalize_using_yearly=eia_stats) - - if 'clip_min_inflow' in config: - inflow = inflow.where(inflow > config['clip_min_inflow'], 0) + snakemake.input.eia_hydro_generation + ).reindex(columns=countries) + inflow = cutout.runoff( + shapes=country_shapes, + smooth=True, + lower_threshold_quantile=True, + normalize_using_yearly=eia_stats, + ) + + if "clip_min_inflow" in config: + inflow = inflow.where(inflow > config["clip_min_inflow"], 0) inflow.to_netcdf(snakemake.output[0]) diff --git a/scripts/build_load_data.py b/scripts/build_load_data.py index f71be6eab..1b5cd4bc9 100755 --- a/scripts/build_load_data.py +++ b/scripts/build_load_data.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -36,6 +37,7 @@ """ import logging + logger = logging.getLogger(__name__) from _helpers import configure_logging @@ -70,23 +72,29 @@ def load_timeseries(fn, years, countries, powerstatistics=True): """ logger.info(f"Retrieving load data from '{fn}'.") - pattern = 'power_statistics' if powerstatistics else '_transparency' - pattern = f'_load_actual_entsoe_{pattern}' - rename = lambda s: s[:-len(pattern)] + pattern = "power_statistics" if powerstatistics else "_transparency" + pattern = f"_load_actual_entsoe_{pattern}" + rename = lambda s: s[: -len(pattern)] date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True) - return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser) - .filter(like=pattern) - .rename(columns=rename) - .dropna(how="all", axis=0) - .rename(columns={'GB_UKM' : 'GB'}) - .filter(items=countries) - .loc[years]) + return ( + pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser) + .filter(like=pattern) + .rename(columns=rename) + .dropna(how="all", axis=0) + .rename(columns={"GB_UKM": "GB"}) + .filter(items=countries) + .loc[years] + ) def consecutive_nans(ds): - return (ds.isnull().astype(int) - .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()]) - .transform('sum').fillna(0)) + return ( + ds.isnull() + .astype(int) + .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()]) + .transform("sum") + .fillna(0) + ) def fill_large_gaps(ds, shift): @@ -96,31 +104,43 @@ def fill_large_gaps(ds, shift): This function fills gaps ragning from 3 to 168 hours (one week). """ shift = Delta(shift) - nhours = shift / np.timedelta64(1, 'h') + nhours = shift / np.timedelta64(1, "h") if (consecutive_nans(ds) > nhours).any(): - logger.warning('There exist gaps larger then the time shift used for ' - 'copying time slices.') + logger.warning( + "There exist gaps larger then the time shift used for " + "copying time slices." + ) time_shift = pd.Series(ds.values, ds.index + shift) return ds.where(ds.notnull(), time_shift.reindex_like(ds)) def nan_statistics(df): def max_consecutive_nans(ds): - return (ds.isnull().astype(int) - .groupby(ds.notnull().astype(int).cumsum()) - .sum().max()) + return ( + ds.isnull() + .astype(int) + .groupby(ds.notnull().astype(int).cumsum()) + .sum() + .max() + ) + consecutive = df.apply(max_consecutive_nans) total = df.isnull().sum() - max_total_per_month = df.isnull().resample('m').sum().max() - return pd.concat([total, consecutive, max_total_per_month], - keys=['total', 'consecutive', 'max_total_per_month'], axis=1) + max_total_per_month = df.isnull().resample("m").sum().max() + return pd.concat( + [total, consecutive, max_total_per_month], + keys=["total", "consecutive", "max_total_per_month"], + axis=1, + ) def copy_timeslice(load, cntry, start, stop, delta): start = pd.Timestamp(start) stop = pd.Timestamp(stop) - if start-delta in load.index and stop in load.index and cntry in load: - load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values + if start - delta in load.index and stop in load.index and cntry in load: + load.loc[start:stop, cntry] = load.loc[ + start - delta : stop - delta, cntry + ].values def manual_adjustment(load, powerstatistics): @@ -159,67 +179,87 @@ def manual_adjustment(load, powerstatistics): """ if powerstatistics: - if 'MK' in load.columns: - if 'AL' not in load.columns or load.AL.isnull().values.all(): - load['AL'] = load['MK'] * (4.1 / 7.4) - if 'RS' in load.columns: - if 'KV' not in load.columns or load.KV.isnull().values.all(): - load['KV'] = load['RS'] * (4.8 / 27.) - - copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1)) - copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2)) - copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1)) - copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1)) + if "MK" in load.columns: + if "AL" not in load.columns or load.AL.isnull().values.all(): + load["AL"] = load["MK"] * (4.1 / 7.4) + if "RS" in load.columns: + if "KV" not in load.columns or load.KV.isnull().values.all(): + load["KV"] = load["RS"] * (4.8 / 27.0) + + copy_timeslice( + load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1) + ) + copy_timeslice( + load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2) + ) + copy_timeslice( + load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1) + ) + copy_timeslice( + load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1) + ) # is a WE, so take WE before - copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1)) - copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1)) - copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1)) + copy_timeslice( + load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1) + ) + copy_timeslice( + load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1) + ) + copy_timeslice( + load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1) + ) # whole january missing - copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364)) + copy_timeslice( + load, "GB", "2009-12-31 23:00", "2010-01-31 23:00", Delta(days=-364) + ) else: - if 'ME' in load: - if 'AL' not in load and 'AL' in countries: - load['AL'] = load.ME * (5.7/2.9) - if 'MK' not in load and 'MK' in countries: - load['MK'] = load.ME * (6.7/2.9) - copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1)) + if "ME" in load: + if "AL" not in load and "AL" in countries: + load["AL"] = load.ME * (5.7 / 2.9) + if "MK" not in load and "MK" in countries: + load["MK"] = load.ME * (6.7 / 2.9) + copy_timeslice( + load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1) + ) return load if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_load_data') + + snakemake = mock_snakemake("build_load_data") configure_logging(snakemake) config = snakemake.config - powerstatistics = config['load']['power_statistics'] - interpolate_limit = config['load']['interpolate_limit'] - countries = config['countries'] - snapshots = pd.date_range(freq='h', **config['snapshots']) + powerstatistics = config["load"]["power_statistics"] + interpolate_limit = config["load"]["interpolate_limit"] + countries = config["countries"] + snapshots = pd.date_range(freq="h", **config["snapshots"]) years = slice(snapshots[0], snapshots[-1]) - time_shift = config['load']['time_shift_for_large_gaps'] + time_shift = config["load"]["time_shift_for_large_gaps"] load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) - if config['load']['manual_adjustments']: + if config["load"]["manual_adjustments"]: load = manual_adjustment(load, powerstatistics) logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") - load = load.interpolate(method='linear', limit=interpolate_limit) + load = load.interpolate(method="linear", limit=interpolate_limit) - logger.info("Filling larger gaps by copying time-slices of period " - f"'{time_shift}'.") + logger.info( + "Filling larger gaps by copying time-slices of period " f"'{time_shift}'." + ) load = load.apply(fill_large_gaps, shift=time_shift) assert not load.isna().any().any(), ( - 'Load data contains nans. Adjust the parameters ' - '`time_shift_for_large_gaps` or modify the `manual_adjustment` function ' - 'for implementing the needed load data modifications.') + "Load data contains nans. Adjust the parameters " + "`time_shift_for_large_gaps` or modify the `manual_adjustment` function " + "for implementing the needed load data modifications." + ) load.to_csv(snakemake.output[0]) - diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index f7a923d68..d7120b8d1 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -56,11 +57,11 @@ def determine_cutout_xXyY(cutout_name): assert cutout.crs.to_epsg() == 4326 x, X, y, Y = cutout.extent dx, dy = cutout.dx, cutout.dy - return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.] + return [x - dx / 2.0, X + dx / 2.0, y - dy / 2.0, Y + dy / 2.0] def get_transform_and_shape(bounds, res): - left, bottom = [(b // res)* res for b in bounds[:2]] + left, bottom = [(b // res) * res for b in bounds[:2]] right, top = [(b // res + 1) * res for b in bounds[2:]] shape = int((top - bottom) // res), int((right - left) / res) transform = rio.Affine(res, 0, left, 0, -res, top) @@ -68,11 +69,11 @@ def get_transform_and_shape(bounds, res): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_natura_raster') - configure_logging(snakemake) + snakemake = mock_snakemake("build_natura_raster") + configure_logging(snakemake) cutouts = snakemake.input.cutouts xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) @@ -84,8 +85,16 @@ def get_transform_and_shape(bounds, res): raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform) raster = raster.astype(rio.uint8) - with rio.open(snakemake.output[0], 'w', driver='GTiff', dtype=rio.uint8, - count=1, transform=transform, crs=3035, compress='lzw', - width=raster.shape[1], height=raster.shape[0]) as dst: + with rio.open( + snakemake.output[0], + "w", + driver="GTiff", + dtype=rio.uint8, + count=1, + transform=transform, + crs=3035, + compress="lzw", + width=raster.shape[1], + height=raster.shape[0], + ) as dst: dst.write(raster, indexes=1) - diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index ab0006319..cd64d073b 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -85,51 +86,58 @@ def add_custom_powerplants(ppl): - custom_ppl_query = snakemake.config['electricity']['custom_powerplants'] + custom_ppl_query = snakemake.config["electricity"]["custom_powerplants"] if not custom_ppl_query: return ppl - add_ppls = pd.read_csv(snakemake.input.custom_powerplants, index_col=0, - dtype={'bus': 'str'}) + add_ppls = pd.read_csv( + snakemake.input.custom_powerplants, index_col=0, dtype={"bus": "str"} + ) if isinstance(custom_ppl_query, str): add_ppls.query(custom_ppl_query, inplace=True) return ppl.append(add_ppls, sort=False, ignore_index=True, verify_integrity=True) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_powerplants') + + snakemake = mock_snakemake("build_powerplants") configure_logging(snakemake) n = pypsa.Network(snakemake.input.base_network) countries = n.buses.country.unique() - ppl = (pm.powerplants(from_url=True) - .powerplant.fill_missing_decommyears() - .powerplant.convert_country_to_alpha2() - .query('Fueltype not in ["Solar", "Wind"] and Country in @countries') - .replace({'Technology': {'Steam Turbine': 'OCGT'}}) - .assign(Fueltype=lambda df: ( - df.Fueltype - .where(df.Fueltype != 'Natural Gas', - df.Technology.replace('Steam Turbine', - 'OCGT').fillna('OCGT'))))) - - ppl_query = snakemake.config['electricity']['powerplants_filter'] + ppl = ( + pm.powerplants(from_url=True) + .powerplant.fill_missing_decommyears() + .powerplant.convert_country_to_alpha2() + .query('Fueltype not in ["Solar", "Wind"] and Country in @countries') + .replace({"Technology": {"Steam Turbine": "OCGT"}}) + .assign( + Fueltype=lambda df: ( + df.Fueltype.where( + df.Fueltype != "Natural Gas", + df.Technology.replace("Steam Turbine", "OCGT").fillna("OCGT"), + ) + ) + ) + ) + + ppl_query = snakemake.config["electricity"]["powerplants_filter"] if isinstance(ppl_query, str): ppl.query(ppl_query, inplace=True) - ppl = add_custom_powerplants(ppl) # add carriers from own powerplant files + ppl = add_custom_powerplants(ppl) # add carriers from own powerplant files cntries_without_ppl = [c for c in countries if c not in ppl.Country.unique()] for c in countries: - substation_i = n.buses.query('substation_lv and country == @c').index - kdtree = KDTree(n.buses.loc[substation_i, ['x','y']].values) - ppl_i = ppl.query('Country == @c').index + substation_i = n.buses.query("substation_lv and country == @c").index + kdtree = KDTree(n.buses.loc[substation_i, ["x", "y"]].values) + ppl_i = ppl.query("Country == @c").index - tree_i = kdtree.query(ppl.loc[ppl_i, ['lon','lat']].values)[1] - ppl.loc[ppl_i, 'bus'] = substation_i.append(pd.Index([np.nan]))[tree_i] + tree_i = kdtree.query(ppl.loc[ppl_i, ["lon", "lat"]].values)[1] + ppl.loc[ppl_i, "bus"] = substation_i.append(pd.Index([np.nan]))[tree_i] if cntries_without_ppl: logging.warning(f"No powerplants known in: {', '.join(cntries_without_ppl)}") diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 9ce83de37..e32311f89 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # @@ -195,42 +196,42 @@ logger = logging.getLogger(__name__) -if __name__ == '__main__': - if 'snakemake' not in globals(): +if __name__ == "__main__": + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_renewable_profiles', technology='solar') + + snakemake = mock_snakemake("build_renewable_profiles", technology="solar") configure_logging(snakemake) pgb.streams.wrap_stderr() paths = snakemake.input - nprocesses = snakemake.config['atlite'].get('nprocesses') - noprogress = not snakemake.config['atlite'].get('show_progress', True) - config = snakemake.config['renewable'][snakemake.wildcards.technology] - resource = config['resource'] # pv panel config / wind turbine config - correction_factor = config.get('correction_factor', 1.) - capacity_per_sqkm = config['capacity_per_sqkm'] - p_nom_max_meth = config.get('potential', 'conservative') + nprocesses = snakemake.config["atlite"].get("nprocesses") + noprogress = not snakemake.config["atlite"].get("show_progress", True) + config = snakemake.config["renewable"][snakemake.wildcards.technology] + resource = config["resource"] # pv panel config / wind turbine config + correction_factor = config.get("correction_factor", 1.0) + capacity_per_sqkm = config["capacity_per_sqkm"] + p_nom_max_meth = config.get("potential", "conservative") if isinstance(config.get("corine", {}), list): - config['corine'] = {'grid_codes': config['corine']} - - if correction_factor != 1.: - logger.info(f'correction_factor is set as {correction_factor}') + config["corine"] = {"grid_codes": config["corine"]} + if correction_factor != 1.0: + logger.info(f"correction_factor is set as {correction_factor}") - cutout = atlite.Cutout(paths['cutout']) - regions = gpd.read_file(paths.regions).set_index('name').rename_axis('bus') + cutout = atlite.Cutout(paths["cutout"]) + regions = gpd.read_file(paths.regions).set_index("name").rename_axis("bus") buses = regions.index excluder = atlite.ExclusionContainer(crs=3035, res=100) - if config['natura']: + if config["natura"]: excluder.add_raster(paths.natura, nodata=0, allow_no_overlap=True) corine = config.get("corine", {}) if "grid_codes" in corine: codes = corine["grid_codes"] excluder.add_raster(paths.corine, codes=codes, invert=True, crs=3035) - if corine.get("distance", 0.) > 0.: + if corine.get("distance", 0.0) > 0.0: codes = corine["distance_grid_codes"] buffer = corine["distance"] excluder.add_raster(paths.corine, codes=codes, buffer=buffer, crs=3035) @@ -239,57 +240,63 @@ # lambda not supported for atlite + multiprocessing # use named function np.greater with partially frozen argument instead # and exclude areas where: -max_depth > grid cell depth - func = functools.partial(np.greater,-config['max_depth']) + func = functools.partial(np.greater, -config["max_depth"]) excluder.add_raster(paths.gebco, codes=func, crs=4236, nodata=-1000) - if 'min_shore_distance' in config: - buffer = config['min_shore_distance'] + if "min_shore_distance" in config: + buffer = config["min_shore_distance"] excluder.add_geometry(paths.country_shapes, buffer=buffer) - if 'max_shore_distance' in config: - buffer = config['max_shore_distance'] + if "max_shore_distance" in config: + buffer = config["max_shore_distance"] excluder.add_geometry(paths.country_shapes, buffer=buffer, invert=True) kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) if noprogress: - logger.info('Calculate landuse availabilities...') + logger.info("Calculate landuse availabilities...") start = time.time() availability = cutout.availabilitymatrix(regions, excluder, **kwargs) duration = time.time() - start - logger.info(f'Completed availability calculation ({duration:2.2f}s)') + logger.info(f"Completed availability calculation ({duration:2.2f}s)") else: availability = cutout.availabilitymatrix(regions, excluder, **kwargs) area = cutout.grid.to_crs(3035).area / 1e6 - area = xr.DataArray(area.values.reshape(cutout.shape), - [cutout.coords['y'], cutout.coords['x']]) + area = xr.DataArray( + area.values.reshape(cutout.shape), [cutout.coords["y"], cutout.coords["x"]] + ) - potential = capacity_per_sqkm * availability.sum('bus') * area - func = getattr(cutout, resource.pop('method')) - resource['dask_kwargs'] = {'num_workers': nprocesses} + potential = capacity_per_sqkm * availability.sum("bus") * area + func = getattr(cutout, resource.pop("method")) + resource["dask_kwargs"] = {"num_workers": nprocesses} capacity_factor = correction_factor * func(capacity_factor=True, **resource) layout = capacity_factor * area * capacity_per_sqkm - profile, capacities = func(matrix=availability.stack(spatial=['y','x']), - layout=layout, index=buses, - per_unit=True, return_capacity=True, **resource) + profile, capacities = func( + matrix=availability.stack(spatial=["y", "x"]), + layout=layout, + index=buses, + per_unit=True, + return_capacity=True, + **resource, + ) logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')") - if p_nom_max_meth == 'simple': + if p_nom_max_meth == "simple": p_nom_max = capacity_per_sqkm * availability @ area - elif p_nom_max_meth == 'conservative': - max_cap_factor = capacity_factor.where(availability!=0).max(['x', 'y']) + elif p_nom_max_meth == "conservative": + max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"]) p_nom_max = capacities / max_cap_factor else: - raise AssertionError('Config key `potential` should be one of "simple" ' - f'(default) or "conservative", not "{p_nom_max_meth}"') - - + raise AssertionError( + 'Config key `potential` should be one of "simple" ' + f'(default) or "conservative", not "{p_nom_max_meth}"' + ) - logger.info('Calculate average distances.') - layoutmatrix = (layout * availability).stack(spatial=['y','x']) + logger.info("Calculate average distances.") + layoutmatrix = (layout * availability).stack(spatial=["y", "x"]) - coords = cutout.grid[['x', 'y']] - bus_coords = regions[['x', 'y']] + coords = cutout.grid[["x", "y"]] + bus_coords = regions[["x", "y"]] average_distance = [] centre_of_mass = [] @@ -298,39 +305,45 @@ nz_b = row != 0 row = row[nz_b] co = coords[nz_b] - distances = haversine(bus_coords.loc[bus], co) + distances = haversine(bus_coords.loc[bus], co) average_distance.append((distances * (row / row.sum())).sum()) centre_of_mass.append(co.values.T @ (row / row.sum())) average_distance = xr.DataArray(average_distance, [buses]) - centre_of_mass = xr.DataArray(centre_of_mass, [buses, ('spatial', ['x', 'y'])]) - - - ds = xr.merge([(correction_factor * profile).rename('profile'), - capacities.rename('weight'), - p_nom_max.rename('p_nom_max'), - potential.rename('potential'), - average_distance.rename('average_distance')]) - + centre_of_mass = xr.DataArray(centre_of_mass, [buses, ("spatial", ["x", "y"])]) + + ds = xr.merge( + [ + (correction_factor * profile).rename("profile"), + capacities.rename("weight"), + p_nom_max.rename("p_nom_max"), + potential.rename("potential"), + average_distance.rename("average_distance"), + ] + ) if snakemake.wildcards.technology.startswith("offwind"): - logger.info('Calculate underwater fraction of connections.') - offshore_shape = gpd.read_file(paths['offshore_shapes']).unary_union + logger.info("Calculate underwater fraction of connections.") + offshore_shape = gpd.read_file(paths["offshore_shapes"]).unary_union underwater_fraction = [] for bus in buses: p = centre_of_mass.sel(bus=bus).data - line = LineString([p, regions.loc[bus, ['x', 'y']]]) - frac = line.intersection(offshore_shape).length/line.length + line = LineString([p, regions.loc[bus, ["x", "y"]]]) + frac = line.intersection(offshore_shape).length / line.length underwater_fraction.append(frac) - ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses]) + ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses]) # select only buses with some capacity and minimal capacity factor - ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) & - (ds['p_nom_max'] > config.get('min_p_nom_max', 0.)))) - - if 'clip_p_max_pu' in config: - min_p_max_pu = config['clip_p_max_pu'] - ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0) + ds = ds.sel( + bus=( + (ds["profile"].mean("time") > config.get("min_p_max_pu", 0.0)) + & (ds["p_nom_max"] > config.get("min_p_nom_max", 0.0)) + ) + ) + + if "clip_p_max_pu" in config: + min_p_max_pu = config["clip_p_max_pu"] + ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0) ds.to_netcdf(snakemake.output.profile) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 5814085b8..fd9a916bd 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -95,41 +96,59 @@ def _get_country(target, **keys): def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): if isinstance(polys, MultiPolygon): - polys = sorted(polys, key=attrgetter('area'), reverse=True) + polys = sorted(polys, key=attrgetter("area"), reverse=True) mainpoly = polys[0] - mainlength = np.sqrt(mainpoly.area/(2.*np.pi)) + mainlength = np.sqrt(mainpoly.area / (2.0 * np.pi)) if mainpoly.area > minarea: - polys = MultiPolygon([p - for p in takewhile(lambda p: p.area > minarea, polys) - if not filterremote or (mainpoly.distance(p) < mainlength)]) + polys = MultiPolygon( + [ + p + for p in takewhile(lambda p: p.area > minarea, polys) + if not filterremote or (mainpoly.distance(p) < mainlength) + ] + ) else: polys = mainpoly return polys.simplify(tolerance=tolerance) def countries(): - cntries = snakemake.config['countries'] - if 'RS' in cntries: cntries.append('KV') + cntries = snakemake.config["countries"] + if "RS" in cntries: + cntries.append("KV") df = gpd.read_file(snakemake.input.naturalearth) # Names are a hassle in naturalearth, try several fields - fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3')) - df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] - - df = df.loc[df.name.isin(cntries) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))] - s = df.set_index('name')['geometry'].map(_simplify_polys) - if 'RS' in cntries: s['RS'] = s['RS'].union(s.pop('KV')) + fieldnames = ( + df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3") + ) + df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] + + df = df.loc[ + df.name.isin(cntries) & ((df["scalerank"] == 0) | (df["scalerank"] == 5)) + ] + s = df.set_index("name")["geometry"].map(_simplify_polys) + if "RS" in cntries: + s["RS"] = s["RS"].union(s.pop("KV")) return s def eez(country_shapes): df = gpd.read_file(snakemake.input.eez) - df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in snakemake.config['countries']])] - df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c)) - s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False)) - s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3}) + df = df.loc[ + df["ISO_3digit"].isin( + [_get_country("alpha_3", alpha_2=c) for c in snakemake.config["countries"]] + ) + ] + df["name"] = df["ISO_3digit"].map(lambda c: _get_country("alpha_2", alpha_3=c)) + s = df.set_index("name").geometry.map( + lambda s: _simplify_polys(s, filterremote=False) + ) + s = gpd.GeoSeries( + {k: v for k, v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3} + ) s.index.name = "name" return s @@ -141,63 +160,93 @@ def country_cover(country_shapes, eez_shapes=None): europe_shape = cascaded_union(shapes) if isinstance(europe_shape, MultiPolygon): - europe_shape = max(europe_shape, key=attrgetter('area')) + europe_shape = max(europe_shape, key=attrgetter("area")) return Polygon(shell=europe_shape.exterior) def nuts3(country_shapes): df = gpd.read_file(snakemake.input.nuts3) - df = df.loc[df['STAT_LEVL_'] == 3] - df['geometry'] = df['geometry'].map(_simplify_polys) - df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id') - - pop = pd.read_table(snakemake.input.nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python') - pop = (pop - .set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS'] - .applymap(lambda x: pd.to_numeric(x, errors='coerce')) - .fillna(method='bfill', axis=1))['2014'] - - gdp = pd.read_table(snakemake.input.nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python') - gdp = (gdp - .set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB'] - .applymap(lambda x: pd.to_numeric(x, errors='coerce')) - .fillna(method='bfill', axis=1))['2014'] + df = df.loc[df["STAT_LEVL_"] == 3] + df["geometry"] = df["geometry"].map(_simplify_polys) + df = df.rename(columns={"NUTS_ID": "id"})[["id", "geometry"]].set_index("id") + + pop = pd.read_table( + snakemake.input.nuts3pop, na_values=[":"], delimiter=" ?\t", engine="python" + ) + pop = ( + pop.set_index( + pd.MultiIndex.from_tuples(pop.pop("unit,geo\\time").str.split(",")) + ) + .loc["THS"] + .applymap(lambda x: pd.to_numeric(x, errors="coerce")) + .fillna(method="bfill", axis=1) + )["2014"] + + gdp = pd.read_table( + snakemake.input.nuts3gdp, na_values=[":"], delimiter=" ?\t", engine="python" + ) + gdp = ( + gdp.set_index( + pd.MultiIndex.from_tuples(gdp.pop("unit,geo\\time").str.split(",")) + ) + .loc["EUR_HAB"] + .applymap(lambda x: pd.to_numeric(x, errors="coerce")) + .fillna(method="bfill", axis=1) + )["2014"] cantons = pd.read_csv(snakemake.input.ch_cantons) - cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS'] - cantons = cantons.str.pad(5, side='right', fillchar='0') + cantons = cantons.set_index(cantons["HASC"].str[3:])["NUTS"] + cantons = cantons.str.pad(5, side="right", fillchar="0") swiss = pd.read_excel(snakemake.input.ch_popgdp, skiprows=3, index_col=0) swiss.columns = swiss.columns.to_series().map(cantons) - pop = pop.append(pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':])) - gdp = gdp.append(pd.to_numeric(swiss.loc['Gross domestic product per capita in Swiss francs', 'CH040':])) + pop = pop.append(pd.to_numeric(swiss.loc["Residents in 1000", "CH040":])) + gdp = gdp.append( + pd.to_numeric( + swiss.loc["Gross domestic product per capita in Swiss francs", "CH040":] + ) + ) df = df.join(pd.DataFrame(dict(pop=pop, gdp=gdp))) - df['country'] = df.index.to_series().str[:2].replace(dict(UK='GB', EL='GR')) - - excludenuts = pd.Index(('FRA10', 'FRA20', 'FRA30', 'FRA40', 'FRA50', - 'PT200', 'PT300', - 'ES707', 'ES703', 'ES704','ES705', 'ES706', 'ES708', 'ES709', - 'FI2', 'FR9')) - excludecountry = pd.Index(('MT', 'TR', 'LI', 'IS', 'CY', 'KV')) + df["country"] = df.index.to_series().str[:2].replace(dict(UK="GB", EL="GR")) + + excludenuts = pd.Index( + ( + "FRA10", + "FRA20", + "FRA30", + "FRA40", + "FRA50", + "PT200", + "PT300", + "ES707", + "ES703", + "ES704", + "ES705", + "ES706", + "ES708", + "ES709", + "FI2", + "FR9", + ) + ) + excludecountry = pd.Index(("MT", "TR", "LI", "IS", "CY", "KV")) df = df.loc[df.index.difference(excludenuts)] df = df.loc[~df.country.isin(excludecountry)] manual = gpd.GeoDataFrame( - [['BA1', 'BA', 3871.], - ['RS1', 'RS', 7210.], - ['AL1', 'AL', 2893.]], - columns=['NUTS_ID', 'country', 'pop'] - ).set_index('NUTS_ID') - manual['geometry'] = manual['country'].map(country_shapes) + [["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]], + columns=["NUTS_ID", "country", "pop"], + ).set_index("NUTS_ID") + manual["geometry"] = manual["country"].map(country_shapes) manual = manual.dropna() df = df.append(manual, sort=False) - df.loc['ME000', 'pop'] = 650. + df.loc["ME000", "pop"] = 650.0 return df @@ -208,14 +257,15 @@ def save_to_geojson(df, fn): if not isinstance(df, gpd.GeoDataFrame): df = gpd.GeoDataFrame(dict(geometry=df)) df = df.reset_index() - schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} - df.to_file(fn, driver='GeoJSON', schema=schema) + schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} + df.to_file(fn, driver="GeoJSON", schema=schema) if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_shapes') + + snakemake = mock_snakemake("build_shapes") configure_logging(snakemake) out = snakemake.output diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 980b73b05..ed8db1ecb 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -137,8 +138,12 @@ from functools import reduce -from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, - _make_consense, get_clustering_from_busmap) +from pypsa.networkclustering import ( + busmap_by_kmeans, + busmap_by_spectral_clustering, + _make_consense, + get_clustering_from_busmap, +) from add_electricity import load_costs @@ -147,19 +152,21 @@ logger = logging.getLogger(__name__) -def normed(x): return (x/x.sum()).fillna(0.) +def normed(x): + return (x / x.sum()).fillna(0.0) def weighting_for_country(n, x): - conv_carriers = {'OCGT','CCGT','PHS', 'hydro'} - gen = (n - .generators.loc[n.generators.carrier.isin(conv_carriers)] - .groupby('bus').p_nom.sum() - .reindex(n.buses.index, fill_value=0.) + - n - .storage_units.loc[n.storage_units.carrier.isin(conv_carriers)] - .groupby('bus').p_nom.sum() - .reindex(n.buses.index, fill_value=0.)) + conv_carriers = {"OCGT", "CCGT", "PHS", "hydro"} + gen = n.generators.loc[n.generators.carrier.isin(conv_carriers)].groupby( + "bus" + ).p_nom.sum().reindex(n.buses.index, fill_value=0.0) + n.storage_units.loc[ + n.storage_units.carrier.isin(conv_carriers) + ].groupby( + "bus" + ).p_nom.sum().reindex( + n.buses.index, fill_value=0.0 + ) load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum() b_i = x.index @@ -167,132 +174,188 @@ def weighting_for_country(n, x): l = normed(load.reindex(b_i, fill_value=0)) w = g + l - return (w * (100. / w.max())).clip(lower=1.).astype(int) + return (w * (100.0 / w.max())).clip(lower=1.0).astype(int) def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): """Determine the number of clusters per country""" if solver_name is None: - solver_name = snakemake.config['solving']['solver']['name'] + solver_name = snakemake.config["solving"]["solver"]["name"] - L = (n.loads_t.p_set.mean() - .groupby(n.loads.bus).sum() - .groupby([n.buses.country, n.buses.sub_network]).sum() - .pipe(normed)) + L = ( + n.loads_t.p_set.mean() + .groupby(n.loads.bus) + .sum() + .groupby([n.buses.country, n.buses.sub_network]) + .sum() + .pipe(normed) + ) - N = n.buses.groupby(['country', 'sub_network']).size() + N = n.buses.groupby(["country", "sub_network"]).size() - assert n_clusters >= len(N) and n_clusters <= N.sum(), \ - f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." + assert ( + n_clusters >= len(N) and n_clusters <= N.sum() + ), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." if focus_weights is not None: total_focus = sum(list(focus_weights.values())) - assert total_focus <= 1.0, "The sum of focus weights must be less than or equal to 1." + assert ( + total_focus <= 1.0 + ), "The sum of focus weights must be less than or equal to 1." for country, weight in focus_weights.items(): L[country] = weight / len(L[country]) - remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')] + remainder = [ + c not in focus_weights.keys() for c in L.index.get_level_values("country") + ] L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus) - logger.warning('Using custom focus weights for determining number of clusters.') + logger.warning("Using custom focus weights for determining number of clusters.") - assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." + assert np.isclose( + L.sum(), 1.0, rtol=1e-3 + ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." m = po.ConcreteModel() + def n_bounds(model, *n_id): return (1, N[n_id]) + m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers) m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) - m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index), - sense=po.minimize) + m.objective = po.Objective( + expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index), + sense=po.minimize, + ) opt = po.SolverFactory(solver_name) - if not opt.has_capability('quadratic_objective'): - logger.warning(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.') - opt = po.SolverFactory('ipopt') + if not opt.has_capability("quadratic_objective"): + logger.warning( + f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`." + ) + opt = po.SolverFactory("ipopt") results = opt.solve(m) - assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}" + assert ( + results["Solver"][0]["Status"] == "ok" + ), f"Solver returned non-optimally: {results}" return pd.Series(m.n.get_values(), index=L.index).astype(int) -def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): +def busmap_for_n_clusters( + n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds +): if algorithm == "kmeans": - algorithm_kwds.setdefault('n_init', 1000) - algorithm_kwds.setdefault('max_iter', 30000) - algorithm_kwds.setdefault('tol', 1e-6) + algorithm_kwds.setdefault("n_init", 1000) + algorithm_kwds.setdefault("max_iter", 30000) + algorithm_kwds.setdefault("tol", 1e-6) n.determine_network_topology() - n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name) + n_clusters = distribute_clusters( + n, n_clusters, focus_weights=focus_weights, solver_name=solver_name + ) def reduce_network(n, buses): nr = pypsa.Network() nr.import_components_from_dataframe(buses, "Bus") - nr.import_components_from_dataframe(n.lines.loc[n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index)], "Line") + nr.import_components_from_dataframe( + n.lines.loc[ + n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index) + ], + "Line", + ) return nr def busmap_for_country(x): - prefix = x.name[0] + x.name[1] + ' ' + prefix = x.name[0] + x.name[1] + " " logger.debug(f"Determining busmap for country {prefix[:-1]}") if len(x) == 1: - return pd.Series(prefix + '0', index=x.index) + return pd.Series(prefix + "0", index=x.index) weight = weighting_for_country(n, x) if algorithm == "kmeans": - return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds) + return prefix + busmap_by_kmeans( + n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds + ) elif algorithm == "spectral": - return prefix + busmap_by_spectral_clustering(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) + return prefix + busmap_by_spectral_clustering( + reduce_network(n, x), n_clusters[x.name], **algorithm_kwds + ) elif algorithm == "louvain": - return prefix + busmap_by_louvain(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) + return prefix + busmap_by_louvain( + reduce_network(n, x), n_clusters[x.name], **algorithm_kwds + ) else: - raise ValueError(f"`algorithm` must be one of 'kmeans', 'spectral' or 'louvain'. Is {algorithm}.") - - return (n.buses.groupby(['country', 'sub_network'], group_keys=False) - .apply(busmap_for_country).squeeze().rename('busmap')) - - -def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, - line_length_factor=1.25, potential_mode='simple', solver_name="cbc", - algorithm="kmeans", extended_link_costs=0, focus_weights=None): + raise ValueError( + f"`algorithm` must be one of 'kmeans', 'spectral' or 'louvain'. Is {algorithm}." + ) - if potential_mode == 'simple': + return ( + n.buses.groupby(["country", "sub_network"], group_keys=False) + .apply(busmap_for_country) + .squeeze() + .rename("busmap") + ) + + +def clustering_for_n_clusters( + n, + n_clusters, + custom_busmap=False, + aggregate_carriers=None, + line_length_factor=1.25, + potential_mode="simple", + solver_name="cbc", + algorithm="kmeans", + extended_link_costs=0, + focus_weights=None, +): + + if potential_mode == "simple": p_nom_max_strategy = np.sum - elif potential_mode == 'conservative': + elif potential_mode == "conservative": p_nom_max_strategy = np.min else: - raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'") + raise AttributeError( + f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'" + ) if custom_busmap: busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True) busmap.index = busmap.index.astype(str) logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") else: - busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) + busmap = busmap_for_n_clusters( + n, n_clusters, solver_name, focus_weights, algorithm + ) clustering = get_clustering_from_busmap( - n, busmap, + n, + busmap, bus_strategies=dict(country=_make_consense("Bus", "country")), aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, - generator_strategies={'p_nom_max': p_nom_max_strategy, 'p_nom_min': np.sum}, - scale_link_capital_costs=False) + generator_strategies={"p_nom_max": p_nom_max_strategy, "p_nom_min": np.sum}, + scale_link_capital_costs=False, + ) if not n.links.empty: nc = clustering.network - nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length') - .div(nc.links.length).dropna()) - nc.links['capital_cost'] = (nc.links['capital_cost'] - .add((nc.links.length - n.links.length) - .clip(lower=0).mul(extended_link_costs), - fill_value=0)) + nc.links["underwater_fraction"] = ( + n.links.eval("underwater_fraction * length").div(nc.links.length).dropna() + ) + nc.links["capital_cost"] = nc.links["capital_cost"].add( + (nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs), + fill_value=0, + ) return clustering @@ -301,21 +364,23 @@ def save_to_geojson(s, fn): if os.path.exists(fn): os.unlink(fn) df = s.reset_index() - schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} - df.to_file(fn, driver='GeoJSON', schema=schema) + schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} + df.to_file(fn, driver="GeoJSON", schema=schema) def cluster_regions(busmaps, input=None, output=None): - if input is None: input = snakemake.input - if output is None: output = snakemake.output + if input is None: + input = snakemake.input + if output is None: + output = snakemake.output busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) - for which in ('regions_onshore', 'regions_offshore'): - regions = gpd.read_file(getattr(input, which)).set_index('name') + for which in ("regions_onshore", "regions_offshore"): + regions = gpd.read_file(getattr(input, which)).set_index("name") geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.cascaded_union) regions_c = gpd.GeoDataFrame(dict(geometry=geom_c)) - regions_c.index.name = 'name' + regions_c.index.name = "name" save_to_geojson(regions_c, getattr(output, which)) @@ -325,65 +390,92 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None): cr = sns.color_palette("hls", len(cs)) n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) if fn is not None: - plt.savefig(fn, bbox_inches='tight') + plt.savefig(fn, bbox_inches="tight") del cs, cr if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('cluster_network', network='elec', simpl='', clusters='5') + + snakemake = mock_snakemake( + "cluster_network", network="elec", simpl="", clusters="5" + ) configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) - focus_weights = snakemake.config.get('focus_weights', None) + focus_weights = snakemake.config.get("focus_weights", None) - renewable_carriers = pd.Index([tech - for tech in n.generators.carrier.unique() - if tech in snakemake.config['renewable']]) + renewable_carriers = pd.Index( + [ + tech + for tech in n.generators.carrier.unique() + if tech in snakemake.config["renewable"] + ] + ) - if snakemake.wildcards.clusters.endswith('m'): + if snakemake.wildcards.clusters.endswith("m"): n_clusters = int(snakemake.wildcards.clusters[:-1]) - aggregate_carriers = pd.Index(n.generators.carrier.unique()).difference(renewable_carriers) + aggregate_carriers = pd.Index(n.generators.carrier.unique()).difference( + renewable_carriers + ) else: n_clusters = int(snakemake.wildcards.clusters) - aggregate_carriers = None # All + aggregate_carriers = None # All if n_clusters == len(n.buses): # Fast-path if no clustering is necessary busmap = n.buses.index.to_series() linemap = n.lines.index.to_series() - clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O')) + clustering = pypsa.networkclustering.Clustering( + n, busmap, linemap, linemap, pd.Series(dtype="O") + ) else: - line_length_factor = snakemake.config['lines']['length_factor'] - Nyears = n.snapshot_weightings.objective.sum()/8760 - hvac_overhead_cost = (load_costs(Nyears, - tech_costs=snakemake.input.tech_costs, - config=snakemake.config['costs'], - elec_config=snakemake.config['electricity']) - .at['HVAC overhead', 'capital_cost']) + line_length_factor = snakemake.config["lines"]["length_factor"] + Nyears = n.snapshot_weightings.objective.sum() / 8760 + hvac_overhead_cost = load_costs( + Nyears, + tech_costs=snakemake.input.tech_costs, + config=snakemake.config["costs"], + elec_config=snakemake.config["electricity"], + ).at["HVAC overhead", "capital_cost"] def consense(x): v = x.iat[0] - assert ((x == v).all() or x.isnull().all()), ( - "The `potential` configuration option must agree for all renewable carriers, for now!" - ) + assert ( + x == v + ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" return v - potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential'] - for tech in renewable_carriers])) + + potential_mode = consense( + pd.Series( + [ + snakemake.config["renewable"][tech]["potential"] + for tech in renewable_carriers + ] + ) + ) custom_busmap = snakemake.config["enable"].get("custom_busmap", False) - clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers, - line_length_factor=line_length_factor, - potential_mode=potential_mode, - solver_name=snakemake.config['solving']['solver']['name'], - extended_link_costs=hvac_overhead_cost, - focus_weights=focus_weights) + clustering = clustering_for_n_clusters( + n, + n_clusters, + custom_busmap, + aggregate_carriers, + line_length_factor=line_length_factor, + potential_mode=potential_mode, + solver_name=snakemake.config["solving"]["solver"]["name"], + extended_link_costs=hvac_overhead_cost, + focus_weights=focus_weights, + ) update_p_nom_max(n) - + clustering.network.export_to_netcdf(snakemake.output.network) - for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative + for attr in ( + "busmap", + "linemap", + ): # also available: linemap_positive, linemap_negative getattr(clustering, attr).to_csv(snakemake.output[attr]) cluster_regions((clustering.busmap,)) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index cff5318c6..fe63d4edc 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -66,7 +67,7 @@ logger = logging.getLogger(__name__) -opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} +opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} def _add_indexed_rows(df, raw_index): @@ -81,99 +82,147 @@ def assign_carriers(n): if "carrier" not in n.loads: n.loads["carrier"] = "electricity" - for carrier in ["transport","heat","urban heat"]: - n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier + for carrier in ["transport", "heat", "urban heat"]: + n.loads.loc[n.loads.index.str.contains(carrier), "carrier"] = carrier - n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True) + n.storage_units["carrier"].replace( + {"hydro": "hydro+PHS", "PHS": "hydro+PHS"}, inplace=True + ) if "carrier" not in n.lines: n.lines["carrier"] = "AC" n.lines["carrier"].replace({"AC": "lines"}, inplace=True) - if n.links.empty: n.links["carrier"] = pd.Series(dtype=str) + if n.links.empty: + n.links["carrier"] = pd.Series(dtype=str) n.links["carrier"].replace({"DC": "lines"}, inplace=True) - if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "": - n.stores.loc["EU gas Store","carrier"] = "gas Store" + if ( + "EU gas store" in n.stores.index + and n.stores.loc["EU gas Store", "carrier"] == "" + ): + n.stores.loc["EU gas Store", "carrier"] = "gas Store" def calculate_costs(n, label, costs): - for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): - capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum() # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([[c.list_name],["capital"],list(capital_costs_grouped.index)]) + raw_index = tuple( + [[c.list_name], ["capital"], list(capital_costs_grouped.index)] + ) costs = _add_indexed_rows(costs, raw_index) - costs.loc[idx[raw_index],label] = capital_costs_grouped.values + costs.loc[idx[raw_index], label] = capital_costs_grouped.values if c.name == "Link": - p = c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum() + p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum() elif c.name == "Line": continue elif c.name == "StorageUnit": - p_all = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0) - p_all[p_all < 0.] = 0. + p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) + p_all[p_all < 0.0] = 0.0 p = p_all.sum() else: - p = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum() + p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum() - marginal_costs = p*c.df.marginal_cost + marginal_costs = p * c.df.marginal_cost marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() - costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))) + costs = costs.reindex( + costs.index.union( + pd.MultiIndex.from_product( + [[c.list_name], ["marginal"], marginal_costs_grouped.index] + ) + ) + ) - costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values + costs.loc[ + idx[c.list_name, "marginal", list(marginal_costs_grouped.index)], label + ] = marginal_costs_grouped.values return costs + def calculate_curtailment(n, label, curtailment): - avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() + avail = ( + n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt) + .sum() + .groupby(n.generators.carrier) + .sum() + ) used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() - curtailment[label] = (((avail - used)/avail)*100).round(3) + curtailment[label] = (((avail - used) / avail) * 100).round(3) return curtailment -def calculate_energy(n, label, energy): - for c in n.iterate_components(n.one_port_components|n.branch_components): +def calculate_energy(n, label, energy): - if c.name in {'Generator', 'Load', 'ShuntImpedance'}: - c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() - elif c.name in {'StorageUnit', 'Store'}: - c_energies = c.pnl.p.multiply(n.snapshot_weightings.stores,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() + for c in n.iterate_components(n.one_port_components | n.branch_components): + + if c.name in {"Generator", "Load", "ShuntImpedance"}: + c_energies = ( + c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) + .sum() + .multiply(c.df.sign) + .groupby(c.df.carrier) + .sum() + ) + elif c.name in {"StorageUnit", "Store"}: + c_energies = ( + c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0) + .sum() + .multiply(c.df.sign) + .groupby(c.df.carrier) + .sum() + ) else: - c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings.generators,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()).groupby(c.df.carrier).sum() + c_energies = ( + ( + -c.pnl.p1.multiply(n.snapshot_weightings.generators, axis=0).sum() + - c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum() + ) + .groupby(c.df.carrier) + .sum() + ) energy = include_in_summary(energy, [c.list_name], label, c_energies) return energy + def include_in_summary(summary, multiindexprefix, label, item): # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([multiindexprefix,list(item.index)]) + raw_index = tuple([multiindexprefix, list(item.index)]) summary = _add_indexed_rows(summary, raw_index) summary.loc[idx[raw_index], label] = item.values return summary -def calculate_capacity(n,label,capacity): + +def calculate_capacity(n, label, capacity): for c in n.iterate_components(n.one_port_components): - if 'p_nom_opt' in c.df.columns: - c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() + if "p_nom_opt" in c.df.columns: + c_capacities = ( + abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() + ) capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) for c in n.iterate_components(n.passive_branch_components): - c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum() + c_capacities = c.df["s_nom_opt"].groupby(c.df.carrier).sum() capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) for c in n.iterate_components(n.controllable_branch_components): @@ -182,6 +231,7 @@ def calculate_capacity(n,label,capacity): return capacity + def calculate_supply(n, label, supply): """calculate the max dispatch of each component at the buses where the loads are attached""" @@ -191,7 +241,7 @@ def calculate_supply(n, label, supply): buses = n.loads.bus[n.loads.carrier == i].values - bus_map = pd.Series(False,index=n.buses.index) + bus_map = pd.Series(False, index=n.buses.index) bus_map.loc[buses] = True @@ -202,29 +252,40 @@ def calculate_supply(n, label, supply): if len(items) == 0 or c.pnl.p.empty: continue - s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() + s = ( + c.pnl.p[items] + .max() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([[i],[c.list_name],list(s.index)]) + raw_index = tuple([[i], [c.list_name], list(s.index)]) supply = _add_indexed_rows(supply, raw_index) - supply.loc[idx[raw_index],label] = s.values - + supply.loc[idx[raw_index], label] = s.values for c in n.iterate_components(n.branch_components): - for end in ["0","1"]: + for end in ["0", "1"]: items = c.df.index[c.df["bus" + end].map(bus_map)] - if len(items) == 0 or c.pnl["p"+end].empty: + if len(items) == 0 or c.pnl["p" + end].empty: continue - #lots of sign compensation for direction and to do maximums - s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum() + # lots of sign compensation for direction and to do maximums + s = (-1) ** (1 - int(end)) * ( + (-1) ** int(end) * c.pnl["p" + end][items] + ).max().groupby(c.df.loc[items, "carrier"]).sum() - supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) - supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values + supply = supply.reindex( + supply.index.union( + pd.MultiIndex.from_product([[i], [c.list_name], s.index]) + ) + ) + supply.loc[idx[i, c.list_name, list(s.index)], label] = s.values return supply @@ -238,7 +299,7 @@ def calculate_supply_energy(n, label, supply_energy): buses = n.loads.bus[n.loads.carrier == i].values - bus_map = pd.Series(False,index=n.buses.index) + bus_map = pd.Series(False, index=n.buses.index) bus_map.loc[buses] = True @@ -249,55 +310,85 @@ def calculate_supply_energy(n, label, supply_energy): if len(items) == 0 or c.pnl.p.empty: continue - s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() + s = ( + c.pnl.p[items] + .sum() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) # Index tuple(s) indicating the newly to-be-added row(s) - raw_index = tuple([[i],[c.list_name],list(s.index)]) + raw_index = tuple([[i], [c.list_name], list(s.index)]) supply_energy = _add_indexed_rows(supply_energy, raw_index) - supply_energy.loc[idx[raw_index],label] = s.values - + supply_energy.loc[idx[raw_index], label] = s.values for c in n.iterate_components(n.branch_components): - for end in ["0","1"]: + for end in ["0", "1"]: items = c.df.index[c.df["bus" + end].map(bus_map)] - if len(items) == 0 or c.pnl['p' + end].empty: + if len(items) == 0 or c.pnl["p" + end].empty: continue - s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum() + s = (-1) * c.pnl["p" + end][items].sum().groupby( + c.df.loc[items, "carrier"] + ).sum() - supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) - supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values + supply_energy = supply_energy.reindex( + supply_energy.index.union( + pd.MultiIndex.from_product([[i], [c.list_name], s.index]) + ) + ) + supply_energy.loc[idx[i, c.list_name, list(s.index)], label] = s.values return supply_energy -def calculate_metrics(n,label,metrics): - - metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))) - - metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() - metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() - metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum() - - if hasattr(n,"line_volume_limit"): - metrics.at["line_volume_limit",label] = n.line_volume_limit - - if hasattr(n,"line_volume_limit_dual"): - metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual +def calculate_metrics(n, label, metrics): + + metrics = metrics.reindex( + metrics.index.union( + pd.Index( + [ + "line_volume", + "line_volume_limit", + "line_volume_AC", + "line_volume_DC", + "line_volume_shadow", + "co2_shadow", + ] + ) + ) + ) + + metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[ + n.links.carrier == "DC" + ].sum() + metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum() + metrics.at["line_volume", label] = metrics.loc[ + ["line_volume_AC", "line_volume_DC"], label + ].sum() + + if hasattr(n, "line_volume_limit"): + metrics.at["line_volume_limit", label] = n.line_volume_limit + + if hasattr(n, "line_volume_limit_dual"): + metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual if "CO2Limit" in n.global_constraints.index: - metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"] + metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"] return metrics -def calculate_prices(n,label,prices): +def calculate_prices(n, label, prices): - bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity") + bus_type = pd.Series(n.buses.index.str[3:], n.buses.index).replace( + "", "electricity" + ) prices = prices.reindex(prices.index.union(bus_type.value_counts().index)) @@ -307,19 +398,38 @@ def calculate_prices(n,label,prices): return prices -def calculate_weighted_prices(n,label,weighted_prices): +def calculate_weighted_prices(n, label, weighted_prices): logger.warning("Weighted prices don't include storage units as loads") - weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"])) - - link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"], - "heat" : ["water tanks charger"], - "urban heat" : ["water tanks charger"], - "space heat" : [], - "space urban heat" : [], - "gas" : ["OCGT","gas boiler","CHP electric","CHP heat"], - "H2" : ["Sabatier", "H2 Fuel Cell"]} + weighted_prices = weighted_prices.reindex( + pd.Index( + [ + "electricity", + "heat", + "space heat", + "urban heat", + "space urban heat", + "gas", + "H2", + ] + ) + ) + + link_loads = { + "electricity": [ + "heat pump", + "resistive heater", + "battery charger", + "H2 Electrolysis", + ], + "heat": ["water tanks charger"], + "urban heat": ["water tanks charger"], + "space heat": [], + "space urban heat": [], + "gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"], + "H2": ["Sabatier", "H2 Fuel Cell"], + } for carrier in link_loads: @@ -328,64 +438,78 @@ def calculate_weighted_prices(n,label,weighted_prices): elif carrier[:5] == "space": suffix = carrier[5:] else: - suffix = " " + carrier + suffix = " " + carrier buses = n.buses.index[n.buses.index.str[2:] == suffix] if buses.empty: continue - if carrier in ["H2","gas"]: - load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.) + if carrier in ["H2", "gas"]: + load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) elif carrier[:5] == "space": - load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix) + load = heat_demand_df[buses.str[:2]].rename( + columns=lambda i: str(i) + suffix + ) else: load = n.loads_t.p_set[buses] - for tech in link_loads[carrier]: - names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech] + names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] if names.empty: continue - load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1) + load += ( + n.links_t.p0[names] + .groupby(n.links.loc[names, "bus0"], axis=1) + .sum(axis=1) + ) # Add H2 Store when charging if carrier == "H2": - stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) - stores[stores > 0.] = 0. + stores = ( + n.stores_t.p[buses + " Store"] + .groupby(n.stores.loc[buses + " Store", "bus"], axis=1) + .sum(axis=1) + ) + stores[stores > 0.0] = 0.0 load += -stores - weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum() + weighted_prices.loc[carrier, label] = ( + load * n.buses_t.marginal_price[buses] + ).sum().sum() / load.sum().sum() if carrier[:5] == "space": - print(load*n.buses_t.marginal_price[buses]) + print(load * n.buses_t.marginal_price[buses]) return weighted_prices -outputs = ["costs", - "curtailment", - "energy", - "capacity", - "supply", - "supply_energy", - "prices", - "weighted_prices", - "metrics", - ] +outputs = [ + "costs", + "curtailment", + "energy", + "capacity", + "supply", + "supply_energy", + "prices", + "weighted_prices", + "metrics", +] -def make_summaries(networks_dict, country='all'): +def make_summaries(networks_dict, country="all"): - columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"]) + columns = pd.MultiIndex.from_tuples( + networks_dict.keys(), names=["simpl", "clusters", "ll", "opts"] + ) dfs = {} for output in outputs: - dfs[output] = pd.DataFrame(columns=columns,dtype=float) + dfs[output] = pd.DataFrame(columns=columns, dtype=float) for label, filename in networks_dict.items(): print(label, filename) @@ -399,12 +523,16 @@ def make_summaries(networks_dict, country='all'): logger.warning("Skipping {filename}".format(filename=filename)) continue - if country != 'all': + if country != "all": n = n[n.buses.country == country] - Nyears = n.snapshot_weightings.objective.sum() / 8760. - costs = load_costs(Nyears, snakemake.input[0], - snakemake.config['costs'], snakemake.config['electricity']) + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs( + Nyears, + snakemake.input[0], + snakemake.config["costs"], + snakemake.config["electricity"], + ) update_transmission_costs(n, costs, simple_hvdc_costs=False) assign_carriers(n) @@ -423,13 +551,21 @@ def to_csv(dfs): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('make_summary', network='elec', simpl='', - clusters='5', ll='copt', opts='Co2L-24H', country='all') - network_dir = os.path.join('..', 'results', 'networks') + + snakemake = mock_snakemake( + "make_summary", + network="elec", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-24H", + country="all", + ) + network_dir = os.path.join("..", "results", "networks") else: - network_dir = os.path.join('results', 'networks') + network_dir = os.path.join("results", "networks") configure_logging(snakemake) def expand_from_wildcard(key): @@ -443,13 +579,15 @@ def expand_from_wildcard(key): else: ll = [snakemake.wildcards.ll] - networks_dict = {(simpl,clusters,l,opts) : - os.path.join(network_dir, f'elec_s{simpl}_' - f'{clusters}_ec_l{l}_{opts}.nc') - for simpl in expand_from_wildcard("simpl") - for clusters in expand_from_wildcard("clusters") - for l in ll - for opts in expand_from_wildcard("opts")} + networks_dict = { + (simpl, clusters, l, opts): os.path.join( + network_dir, f"elec_s{simpl}_" f"{clusters}_ec_l{l}_{opts}.nc" + ) + for simpl in expand_from_wildcard("simpl") + for clusters in expand_from_wildcard("clusters") + for l in ll + for opts in expand_from_wildcard("opts") + } dfs = make_summaries(networks_dict, country=snakemake.wildcards.country) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 456bf50fe..eb409579e 100755 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -20,8 +21,12 @@ """ import logging -from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, - configure_logging) +from _helpers import ( + load_network_for_plots, + aggregate_p, + aggregate_costs, + configure_logging, +) import pandas as pd import numpy as np @@ -31,6 +36,7 @@ import matplotlib as mpl from matplotlib.patches import Circle, Ellipse from matplotlib.legend_handler import HandlerPatch + to_rgba = mpl.colors.colorConverter.to_rgba logger = logging.getLogger(__name__) @@ -38,239 +44,352 @@ def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): fig = ax.get_figure() + def axes2pt(): - return np.diff(ax.transData.transform([(0,0), (1,1)]), axis=0)[0] * (72./fig.dpi) + return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * ( + 72.0 / fig.dpi + ) ellipses = [] if not dont_resize_actively: + def update_width_height(event): dist = axes2pt() - for e, radius in ellipses: e.width, e.height = 2. * radius * dist - fig.canvas.mpl_connect('resize_event', update_width_height) - ax.callbacks.connect('xlim_changed', update_width_height) - ax.callbacks.connect('ylim_changed', update_width_height) - - def legend_circle_handler(legend, orig_handle, xdescent, ydescent, - width, height, fontsize): - w, h = 2. * orig_handle.get_radius() * axes2pt() - e = Ellipse(xy=(0.5*width-0.5*xdescent, 0.5*height-0.5*ydescent), width=w, height=w) + for e, radius in ellipses: + e.width, e.height = 2.0 * radius * dist + + fig.canvas.mpl_connect("resize_event", update_width_height) + ax.callbacks.connect("xlim_changed", update_width_height) + ax.callbacks.connect("ylim_changed", update_width_height) + + def legend_circle_handler( + legend, orig_handle, xdescent, ydescent, width, height, fontsize + ): + w, h = 2.0 * orig_handle.get_radius() * axes2pt() + e = Ellipse( + xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent), + width=w, + height=w, + ) ellipses.append((e, orig_handle.get_radius())) return e + return {Circle: HandlerPatch(patch_func=legend_circle_handler)} def make_legend_circles_for(sizes, scale=1.0, **kw): - return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes] + return [Circle((0, 0), radius=(s / scale) ** 0.5, **kw) for s in sizes] def set_plot_style(): - plt.style.use(['classic', 'seaborn-white', - {'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6', - 'hatch.color': 'white', - 'patch.linewidth': 0.5, - 'font.size': 12, - 'legend.fontsize': 'medium', - 'lines.linewidth': 1.5, - 'pdf.fonttype': 42, - }]) - - -def plot_map(n, ax=None, attribute='p_nom', opts={}): + plt.style.use( + [ + "classic", + "seaborn-white", + { + "axes.grid": False, + "grid.linestyle": "--", + "grid.color": u"0.6", + "hatch.color": "white", + "patch.linewidth": 0.5, + "font.size": 12, + "legend.fontsize": "medium", + "lines.linewidth": 1.5, + "pdf.fonttype": 42, + }, + ] + ) + + +def plot_map(n, ax=None, attribute="p_nom", opts={}): if ax is None: ax = plt.gca() ## DATA - line_colors = {'cur': "purple", - 'exp': mpl.colors.rgb2hex(to_rgba("red", 0.7), True)} - tech_colors = opts['tech_colors'] + line_colors = { + "cur": "purple", + "exp": mpl.colors.rgb2hex(to_rgba("red", 0.7), True), + } + tech_colors = opts["tech_colors"] - if attribute == 'p_nom': + if attribute == "p_nom": # bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum() - bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(), - n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum())) + bus_sizes = pd.concat( + ( + n.generators.query('carrier != "load"') + .groupby(["bus", "carrier"]) + .p_nom_opt.sum(), + n.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum(), + ) + ) line_widths_exp = n.lines.s_nom_opt line_widths_cur = n.lines.s_nom_min link_widths_exp = n.links.p_nom_opt link_widths_cur = n.links.p_nom_min else: - raise 'plotting of {} has not been implemented yet'.format(attribute) - + raise "plotting of {} has not been implemented yet".format(attribute) - line_colors_with_alpha = \ - ((line_widths_cur / n.lines.s_nom > 1e-3) - .map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)})) - link_colors_with_alpha = \ - ((link_widths_cur / n.links.p_nom > 1e-3) - .map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)})) - + line_colors_with_alpha = (line_widths_cur / n.lines.s_nom > 1e-3).map( + {True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)} + ) + link_colors_with_alpha = (link_widths_cur / n.links.p_nom > 1e-3).map( + {True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)} + ) ## FORMAT - linewidth_factor = opts['map'][attribute]['linewidth_factor'] - bus_size_factor = opts['map'][attribute]['bus_size_factor'] + linewidth_factor = opts["map"][attribute]["linewidth_factor"] + bus_size_factor = opts["map"][attribute]["bus_size_factor"] ## PLOT - n.plot(line_widths=line_widths_exp/linewidth_factor, - link_widths=link_widths_exp/linewidth_factor, - line_colors=line_colors['exp'], - link_colors=line_colors['exp'], - bus_sizes=bus_sizes/bus_size_factor, - bus_colors=tech_colors, - boundaries=map_boundaries, - color_geomap=True, geomap=True, - ax=ax) - n.plot(line_widths=line_widths_cur/linewidth_factor, - link_widths=link_widths_cur/linewidth_factor, - line_colors=line_colors_with_alpha, - link_colors=link_colors_with_alpha, - bus_sizes=0, - boundaries=map_boundaries, - color_geomap=True, geomap=False, - ax=ax) - ax.set_aspect('equal') - ax.axis('off') + n.plot( + line_widths=line_widths_exp / linewidth_factor, + link_widths=link_widths_exp / linewidth_factor, + line_colors=line_colors["exp"], + link_colors=line_colors["exp"], + bus_sizes=bus_sizes / bus_size_factor, + bus_colors=tech_colors, + boundaries=map_boundaries, + color_geomap=True, + geomap=True, + ax=ax, + ) + n.plot( + line_widths=line_widths_cur / linewidth_factor, + link_widths=link_widths_cur / linewidth_factor, + line_colors=line_colors_with_alpha, + link_colors=link_colors_with_alpha, + bus_sizes=0, + boundaries=map_boundaries, + color_geomap=True, + geomap=False, + ax=ax, + ) + ax.set_aspect("equal") + ax.axis("off") # Rasterize basemap # TODO : Check if this also works with cartopy - for c in ax.collections[:2]: c.set_rasterized(True) + for c in ax.collections[:2]: + c.set_rasterized(True) # LEGEND handles = [] labels = [] for s in (10, 1): - handles.append(plt.Line2D([0],[0],color=line_colors['exp'], - linewidth=s*1e3/linewidth_factor)) + handles.append( + plt.Line2D( + [0], [0], color=line_colors["exp"], linewidth=s * 1e3 / linewidth_factor + ) + ) labels.append("{} GW".format(s)) - l1_1 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.24, 1.01), - frameon=False, - labelspacing=0.8, handletextpad=1.5, - title='Transmission Exp./Exist. ') + l1_1 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.24, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=1.5, + title="Transmission Exp./Exist. ", + ) ax.add_artist(l1_1) handles = [] labels = [] for s in (10, 5): - handles.append(plt.Line2D([0],[0],color=line_colors['cur'], - linewidth=s*1e3/linewidth_factor)) + handles.append( + plt.Line2D( + [0], [0], color=line_colors["cur"], linewidth=s * 1e3 / linewidth_factor + ) + ) labels.append("/") - l1_2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.26, 1.01), - frameon=False, - labelspacing=0.8, handletextpad=0.5, - title=' ') + l1_2 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.26, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=0.5, + title=" ", + ) ax.add_artist(l1_2) - handles = make_legend_circles_for([10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w") + handles = make_legend_circles_for( + [10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w" + ) labels = ["{} GW".format(s) for s in (10, 5, 3)] - l2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.01, 1.01), - frameon=False, labelspacing=1.0, - title='Generation', - handler_map=make_handler_map_to_scale_circles_as_in(ax)) + l2 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.01, 1.01), + frameon=False, + labelspacing=1.0, + title="Generation", + handler_map=make_handler_map_to_scale_circles_as_in(ax), + ) ax.add_artist(l2) - techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs'])) + techs = (bus_sizes.index.levels[1]).intersection( + pd.Index(opts["vre_techs"] + opts["conv_techs"] + opts["storage_techs"]) + ) handles = [] labels = [] for t in techs: - handles.append(plt.Line2D([0], [0], color=tech_colors[t], marker='o', markersize=8, linewidth=0)) - labels.append(opts['nice_names'].get(t, t)) - l3 = ax.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.), # bbox_to_anchor=(0.72, -0.05), - handletextpad=0., columnspacing=0.5, ncol=4, title='Technology') + handles.append( + plt.Line2D( + [0], [0], color=tech_colors[t], marker="o", markersize=8, linewidth=0 + ) + ) + labels.append(opts["nice_names"].get(t, t)) + l3 = ax.legend( + handles, + labels, + loc="upper center", + bbox_to_anchor=(0.5, -0.0), # bbox_to_anchor=(0.72, -0.05), + handletextpad=0.0, + columnspacing=0.5, + ncol=4, + title="Technology", + ) return fig def plot_total_energy_pie(n, ax=None): - if ax is None: ax = plt.gca() + if ax is None: + ax = plt.gca() - ax.set_title('Energy per technology', fontdict=dict(fontsize="medium")) + ax.set_title("Energy per technology", fontdict=dict(fontsize="medium")) - e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0] + e_primary = aggregate_p(n).drop("load", errors="ignore").loc[lambda s: s > 0] - patches, texts, autotexts = ax.pie(e_primary, + patches, texts, autotexts = ax.pie( + e_primary, startangle=90, - labels = e_primary.rename(opts['nice_names']).index, - autopct='%.0f%%', + labels=e_primary.rename(opts["nice_names"]).index, + autopct="%.0f%%", shadow=False, - colors = [opts['tech_colors'][tech] for tech in e_primary.index]) + colors=[opts["tech_colors"][tech] for tech in e_primary.index], + ) for t1, t2, i in zip(texts, autotexts, e_primary.index): if e_primary.at[i] < 0.04 * e_primary.sum(): t1.remove() t2.remove() + def plot_total_cost_bar(n, ax=None): - if ax is None: ax = plt.gca() + if ax is None: + ax = plt.gca() total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum() - tech_colors = opts['tech_colors'] + tech_colors = opts["tech_colors"] def split_costs(n): costs = aggregate_costs(n).reset_index(level=0, drop=True) - costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True) - return (costs['capital'].add(costs['marginal'], fill_value=0.), - costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal']) + costs_ex = aggregate_costs(n, existing_only=True).reset_index( + level=0, drop=True + ) + return ( + costs["capital"].add(costs["marginal"], fill_value=0.0), + costs_ex["capital"], + costs["capital"] - costs_ex["capital"], + costs["marginal"], + ) costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n) - costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')), - index=['AC-AC', 'AC line', 'onwind', 'offwind-ac', - 'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna() - bottom = np.array([0., 0.]) + costs_graph = pd.DataFrame( + dict(a=costs.drop("load", errors="ignore")), + index=[ + "AC-AC", + "AC line", + "onwind", + "offwind-ac", + "offwind-dc", + "solar", + "OCGT", + "CCGT", + "battery", + "H2", + ], + ).dropna() + bottom = np.array([0.0, 0.0]) texts = [] - for i,ind in enumerate(costs_graph.index): - data = np.asarray(costs_graph.loc[ind])/total_load - ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], - width=0.7, zorder=-1) + for i, ind in enumerate(costs_graph.index): + data = np.asarray(costs_graph.loc[ind]) / total_load + ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], width=0.7, zorder=-1) bottom_sub = bottom - bottom = bottom+data + bottom = bottom + data - if ind in opts['conv_techs'] + ['AC line']: + if ind in opts["conv_techs"] + ["AC line"]: for c in [costs_cap_ex, costs_marg]: if ind in c: - data_sub = np.asarray([c.loc[ind]])/total_load - ax.bar([0.5], data_sub, linewidth=0, - bottom=bottom_sub, color=tech_colors[ind], - width=0.7, zorder=-1, alpha=0.8) + data_sub = np.asarray([c.loc[ind]]) / total_load + ax.bar( + [0.5], + data_sub, + linewidth=0, + bottom=bottom_sub, + color=tech_colors[ind], + width=0.7, + zorder=-1, + alpha=0.8, + ) bottom_sub += data_sub if abs(data[-1]) < 5: continue - text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind)) + text = ax.text( + 1.1, (bottom - 0.5 * data)[-1] - 3, opts["nice_names"].get(ind, ind) + ) texts.append(text) ax.set_ylabel("Average system cost [Eur/MWh]") - ax.set_ylim([0, opts.get('costs_max', 80)]) + ax.set_ylim([0, opts.get("costs_max", 80)]) ax.set_xlim([0, 1]) ax.set_xticklabels([]) - ax.grid(True, axis="y", color='k', linestyle='dotted') + ax.grid(True, axis="y", color="k", linestyle="dotted") if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('plot_network', network='elec', simpl='', - clusters='5', ll='copt', opts='Co2L-24H', - attr='p_nom', ext="pdf") + + snakemake = mock_snakemake( + "plot_network", + network="elec", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-24H", + attr="p_nom", + ext="pdf", + ) configure_logging(snakemake) set_plot_style() - opts = snakemake.config['plotting'] - map_figsize = opts['map']['figsize'] - map_boundaries = opts['map']['boundaries'] + opts = snakemake.config["plotting"] + map_figsize = opts["map"]["figsize"] + map_boundaries = opts["map"]["boundaries"] - n = load_network_for_plots(snakemake.input.network, snakemake.input.tech_costs, snakemake.config) + n = load_network_for_plots( + snakemake.input.network, snakemake.input.tech_costs, snakemake.config + ) - scenario_opts = snakemake.wildcards.opts.split('-') + scenario_opts = snakemake.wildcards.opts.split("-") - fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}) + fig, ax = plt.subplots( + figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()} + ) plot_map(n, ax, snakemake.wildcards.attr, opts) - fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches='tight') + fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches="tight") ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2]) plot_total_energy_pie(n, ax1) @@ -281,9 +400,12 @@ def split_costs(n): ll = snakemake.wildcards.ll ll_type = ll[0] ll_factor = ll[1:] - lbl = dict(c='line cost', v='line volume')[ll_type] - amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal' - fig.suptitle('Expansion to {amount} {label} at {clusters} clusters' - .format(amount=amnt, label=lbl, clusters=snakemake.wildcards.clusters)) - - fig.savefig(snakemake.output.ext, transparent=True, bbox_inches='tight') + lbl = dict(c="line cost", v="line volume")[ll_type] + amnt = "{ll} x today's".format(ll=ll_factor) if ll_factor != "opt" else "optimal" + fig.suptitle( + "Expansion to {amount} {label} at {clusters} clusters".format( + amount=amnt, label=lbl, clusters=snakemake.wildcards.clusters + ) + ) + + fig.savefig(snakemake.output.ext, transparent=True, bbox_inches="tight") diff --git a/scripts/plot_p_nom_max.py b/scripts/plot_p_nom_max.py index e79ad2741..948540770 100644 --- a/scripts/plot_p_nom_max.py +++ b/scripts/plot_p_nom_max.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -31,11 +32,13 @@ def cum_p_nom_max(net, tech, country=None): carrier_b = net.generators.carrier == tech - generators = pd.DataFrame(dict( - p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'], - p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(), - country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country) - )).sort_values("p_max_pu", ascending=False) + generators = pd.DataFrame( + dict( + p_nom_max=net.generators.loc[carrier_b, "p_nom_max"], + p_max_pu=net.generators_t.p_max_pu.loc[:, carrier_b].mean(), + country=net.generators.loc[carrier_b, "bus"].map(net.buses.country), + ) + ).sort_values("p_max_pu", ascending=False) if country is not None: generators = generators.loc[generators.country == country] @@ -46,22 +49,29 @@ def cum_p_nom_max(net, tech, country=None): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('plot_p_nom_max', network='elec', simpl='', - techs='solar,onwind,offwind-dc', ext='png', - clusts= '5,full', country= 'all') + + snakemake = mock_snakemake( + "plot_p_nom_max", + network="elec", + simpl="", + techs="solar,onwind,offwind-dc", + ext="png", + clusts="5,full", + country="all", + ) configure_logging(snakemake) plot_kwds = dict(drawstyle="steps-post") - clusters = snakemake.wildcards.clusts.split(',') - techs = snakemake.wildcards.techs.split(',') + clusters = snakemake.wildcards.clusts.split(",") + techs = snakemake.wildcards.techs.split(",") country = snakemake.wildcards.country - if country == 'all': + if country == "all": country = None else: - plot_kwds['marker'] = 'x' + plot_kwds["marker"] = "x" fig, axes = plt.subplots(1, len(techs)) @@ -69,8 +79,9 @@ def cum_p_nom_max(net, tech, country=None): net = pypsa.Network(snakemake.input[j]) for i, tech in enumerate(techs): - cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max", - label=cluster, ax=axes[i], **plot_kwds) + cum_p_nom_max(net, tech, country).plot( + x="p_max_pu", y="cum_p_nom_max", label=cluster, ax=axes[i], **plot_kwds + ) for i, tech in enumerate(techs): ax = axes[i] @@ -79,4 +90,4 @@ def cum_p_nom_max(net, tech, country=None): plt.legend(title="Cluster level") - fig.savefig(snakemake.output[0], transparent=True, bbox_inches='tight') + fig.savefig(snakemake.output[0], transparent=True, bbox_inches="tight") diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index a34611de0..3cba60b6b 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -52,22 +53,38 @@ def rename_techs(label): return label -preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"]) +preferred_order = pd.Index( + [ + "transmission lines", + "hydroelectricity", + "hydro reservoir", + "run of river", + "pumped hydro storage", + "onshore wind", + "offshore wind ac", + "offshore wind dc", + "solar PV", + "solar thermal", + "OCGT", + "hydrogen storage", + "battery storage", + ] +) def plot_costs(infn, fn=None): ## For now ignore the simpl header - cost_df = pd.read_csv(infn,index_col=list(range(3)),header=[1,2,3]) + cost_df = pd.read_csv(infn, index_col=list(range(3)), header=[1, 2, 3]) df = cost_df.groupby(cost_df.index.get_level_values(2)).sum() - #convert to billions - df = df/1e9 + # convert to billions + df = df / 1e9 df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']] + to_drop = df.index[df.max(axis=1) < snakemake.config["plotting"]["costs_threshold"]] print("dropping") @@ -77,22 +94,28 @@ def plot_costs(infn, fn=None): print(df.sum()) - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = (preferred_order & df.index).append( + df.index.difference(preferred_order) + ) new_columns = df.sum().sort_values().index fig, ax = plt.subplots() - fig.set_size_inches((12,8)) + fig.set_size_inches((12, 8)) - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) + df.loc[new_index, new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], + ) - - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([0,snakemake.config['plotting']['costs_max']]) + ax.set_ylim([0, snakemake.config["plotting"]["costs_max"]]) ax.set_ylabel("System Cost [EUR billion per year]") @@ -100,8 +123,7 @@ def plot_costs(infn, fn=None): ax.grid(axis="y") - ax.legend(handles,labels,ncol=4,loc="upper left") - + ax.legend(handles, labels, ncol=4, loc="upper left") fig.tight_layout() @@ -111,16 +133,18 @@ def plot_costs(infn, fn=None): def plot_energy(infn, fn=None): - energy_df = pd.read_csv(infn, index_col=list(range(2)),header=[1,2,3]) + energy_df = pd.read_csv(infn, index_col=list(range(2)), header=[1, 2, 3]) df = energy_df.groupby(energy_df.index.get_level_values(1)).sum() - #convert MWh to TWh - df = df/1e6 + # convert MWh to TWh + df = df / 1e6 df = df.groupby(df.index.map(rename_techs)).sum() - to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']] + to_drop = df.index[ + df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"] + ] print("dropping") @@ -130,22 +154,33 @@ def plot_energy(infn, fn=None): print(df.sum()) - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = (preferred_order & df.index).append( + df.index.difference(preferred_order) + ) new_columns = df.columns.sort_values() fig, ax = plt.subplots() - fig.set_size_inches((12,8)) - - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) + fig.set_size_inches((12, 8)) + df.loc[new_index, new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index], + ) - handles,labels = ax.get_legend_handles_labels() + handles, labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([snakemake.config['plotting']['energy_min'],snakemake.config['plotting']['energy_max']]) + ax.set_ylim( + [ + snakemake.config["plotting"]["energy_min"], + snakemake.config["plotting"]["energy_max"], + ] + ) ax.set_ylabel("Energy [TWh/a]") @@ -153,8 +188,7 @@ def plot_energy(infn, fn=None): ax.grid(axis="y") - ax.legend(handles,labels,ncol=4,loc="upper left") - + ax.legend(handles, labels, ncol=4, loc="upper left") fig.tight_layout() @@ -163,11 +197,21 @@ def plot_energy(infn, fn=None): if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('plot_summary', summary='energy', network='elec', - simpl='', clusters=5, ll='copt', opts='Co2L-24H', - attr='', ext='png', country='all') + + snakemake = mock_snakemake( + "plot_summary", + summary="energy", + network="elec", + simpl="", + clusters=5, + ll="copt", + opts="Co2L-24H", + attr="", + ext="png", + country="all", + ) configure_logging(snakemake) summary = snakemake.wildcards.summary diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index b83089d65..a51b1abea 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # @@ -49,29 +50,45 @@ def multiply(s): def extract_coordinates(s): - regex = (r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " - r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)") + regex = ( + r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)" + ) e = s.str.extract(regex, expand=True) - lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.}) - lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.}) + lat = ( + e[0].astype(float) + (e[1].astype(float) + e[2].astype(float) / 60.0) / 60.0 + ) * e[3].map({"N": +1.0, "S": -1.0}) + lon = ( + e[4].astype(float) + (e[5].astype(float) + e[6].astype(float) / 60.0) / 60.0 + ) * e[7].map({"E": +1.0, "W": -1.0}) return lon, lat if __name__ == "__main__": - if 'snakemake' not in globals(): - from _helpers import mock_snakemake #rule must be enabled in config - snakemake = mock_snakemake('prepare_links_p_nom', simpl='', network='elec') + if "snakemake" not in globals(): + from _helpers import mock_snakemake # rule must be enabled in config + + snakemake = mock_snakemake("prepare_links_p_nom", simpl="", network="elec") configure_logging(snakemake) - links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] + links_p_nom = pd.read_html( + "https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol" + )[0] mw = "Power (MW)" - m_b = links_p_nom[mw].str.contains('x').fillna(False) - - links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply) - links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float) - - links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) - links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) - - links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(snakemake.output[0], index=False) + m_b = links_p_nom[mw].str.contains("x").fillna(False) + + links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split("x").pipe(multiply) + links_p_nom[mw] = ( + links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float) + ) + + links_p_nom["x1"], links_p_nom["y1"] = extract_coordinates( + links_p_nom["Converterstation 1"] + ) + links_p_nom["x2"], links_p_nom["y2"] = extract_coordinates( + links_p_nom["Converterstation 2"] + ) + + links_p_nom.dropna(subset=["x1", "y1", "x2", "y2"]).to_csv( + snakemake.output[0], index=False + ) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index ed33abb7e..e5ac4db14 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -70,66 +71,86 @@ logger = logging.getLogger(__name__) -def add_co2limit(n, Nyears=1., factor=None): +def add_co2limit(n, Nyears=1.0, factor=None): if factor is not None: - annual_emissions = factor*snakemake.config['electricity']['co2base'] + annual_emissions = factor * snakemake.config["electricity"]["co2base"] else: - annual_emissions = snakemake.config['electricity']['co2limit'] + annual_emissions = snakemake.config["electricity"]["co2limit"] - n.add("GlobalConstraint", "CO2Limit", - carrier_attribute="co2_emissions", sense="<=", - constant=annual_emissions * Nyears) + n.add( + "GlobalConstraint", + "CO2Limit", + carrier_attribute="co2_emissions", + sense="<=", + constant=annual_emissions * Nyears, + ) def add_emission_prices(n, emission_prices=None, exclude_co2=False): if emission_prices is None: - emission_prices = snakemake.config['costs']['emission_prices'] - if exclude_co2: emission_prices.pop('co2') - ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * - n.carriers.filter(like='_emissions')).sum(axis=1) + emission_prices = snakemake.config["costs"]["emission_prices"] + if exclude_co2: + emission_prices.pop("co2") + ep = ( + pd.Series(emission_prices).rename(lambda x: x + "_emissions") + * n.carriers.filter(like="_emissions") + ).sum(axis=1) gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency - n.generators['marginal_cost'] += gen_ep + n.generators["marginal_cost"] += gen_ep su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch - n.storage_units['marginal_cost'] += su_ep + n.storage_units["marginal_cost"] += su_ep def set_line_s_max_pu(n): - s_max_pu = snakemake.config['lines']['s_max_pu'] - n.lines['s_max_pu'] = s_max_pu + s_max_pu = snakemake.config["lines"]["s_max_pu"] + n.lines["s_max_pu"] = s_max_pu logger.info(f"N-1 security margin of lines set to {s_max_pu}") def set_transmission_limit(n, ll_type, factor, Nyears=1): - links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series() - - _lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) * - n.lines.num_parallel * n.lines.bus0.map(n.buses.v_nom)) - lines_s_nom = n.lines.s_nom.where(n.lines.type == '', _lines_s_nom) - - - col = 'capital_cost' if ll_type == 'c' else 'length' - ref = (lines_s_nom @ n.lines[col] + - n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]) - - costs = load_costs(Nyears, snakemake.input.tech_costs, - snakemake.config['costs'], - snakemake.config['electricity']) + links_dc_b = n.links.carrier == "DC" if not n.links.empty else pd.Series() + + _lines_s_nom = ( + np.sqrt(3) + * n.lines.type.map(n.line_types.i_nom) + * n.lines.num_parallel + * n.lines.bus0.map(n.buses.v_nom) + ) + lines_s_nom = n.lines.s_nom.where(n.lines.type == "", _lines_s_nom) + + col = "capital_cost" if ll_type == "c" else "length" + ref = ( + lines_s_nom @ n.lines[col] + + n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col] + ) + + costs = load_costs( + Nyears, + snakemake.input.tech_costs, + snakemake.config["costs"], + snakemake.config["electricity"], + ) update_transmission_costs(n, costs, simple_hvdc_costs=False) - if factor == 'opt' or float(factor) > 1.0: - n.lines['s_nom_min'] = lines_s_nom - n.lines['s_nom_extendable'] = True + if factor == "opt" or float(factor) > 1.0: + n.lines["s_nom_min"] = lines_s_nom + n.lines["s_nom_extendable"] = True - n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom'] - n.links.loc[links_dc_b, 'p_nom_extendable'] = True + n.links.loc[links_dc_b, "p_nom_min"] = n.links.loc[links_dc_b, "p_nom"] + n.links.loc[links_dc_b, "p_nom_extendable"] = True - if factor != 'opt': - con_type = 'expansion_cost' if ll_type == 'c' else 'volume_expansion' + if factor != "opt": + con_type = "expansion_cost" if ll_type == "c" else "volume_expansion" rhs = float(factor) * ref - n.add('GlobalConstraint', f'l{ll_type}_limit', - type=f'transmission_{con_type}_limit', - sense='<=', constant=rhs, carrier_attribute='AC, DC') + n.add( + "GlobalConstraint", + f"l{ll_type}_limit", + type=f"transmission_{con_type}_limit", + sense="<=", + constant=rhs, + carrier_attribute="AC, DC", + ) return n @@ -143,7 +164,7 @@ def average_every_nhours(n, offset): m.snapshot_weightings = snapshot_weightings for c in n.iterate_components(): - pnl = getattr(m, c.list_name+"_t") + pnl = getattr(m, c.list_name + "_t") for k, df in c.pnl.items(): if not df.empty: pnl[k] = df.resample(offset).mean() @@ -156,15 +177,16 @@ def apply_time_segmentation(n, segments): try: import tsam.timeseriesaggregation as tsam except: - raise ModuleNotFoundError("Optional dependency 'tsam' not found." - "Install via 'pip install tsam'") + raise ModuleNotFoundError( + "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + ) p_max_pu_norm = n.generators_t.p_max_pu.max() p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm load_norm = n.loads_t.p_set.max() load = n.loads_t.p_set / load_norm - + inflow_norm = n.storage_units_t.inflow.max() inflow = n.storage_units_t.inflow / inflow_norm @@ -172,9 +194,14 @@ def apply_time_segmentation(n, segments): solver_name = snakemake.config["solving"]["solver"]["name"] - agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), - noTypicalPeriods=1, noSegments=int(segments), - segmentation=True, solver=solver_name) + agg = tsam.TimeSeriesAggregation( + raw, + hoursPerPeriod=len(raw), + noTypicalPeriods=1, + noSegments=int(segments), + segmentation=True, + solver=solver_name, + ) segmented = agg.createTypicalPeriods() @@ -182,9 +209,11 @@ def apply_time_segmentation(n, segments): offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets] - n.set_snapshots(pd.DatetimeIndex(snapshots, name='name')) - n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64") - + n.set_snapshots(pd.DatetimeIndex(snapshots, name="name")) + n.snapshot_weightings = pd.Series( + weightings, index=snapshots, name="weightings", dtype="float64" + ) + segmented.index = snapshots n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm n.loads_t.p_set = segmented[n.loads_t.p_set.columns] * load_norm @@ -192,50 +221,58 @@ def apply_time_segmentation(n, segments): return n + def enforce_autarky(n, only_crossborder=False): if only_crossborder: lines_rm = n.lines.loc[ - n.lines.bus0.map(n.buses.country) != - n.lines.bus1.map(n.buses.country) - ].index + n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country) + ].index links_rm = n.links.loc[ - n.links.bus0.map(n.buses.country) != - n.links.bus1.map(n.buses.country) - ].index + n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country) + ].index else: lines_rm = n.lines.index - links_rm = n.links.loc[n.links.carrier=="DC"].index + links_rm = n.links.loc[n.links.carrier == "DC"].index n.mremove("Line", lines_rm) n.mremove("Link", links_rm) + def set_line_nom_max(n): s_nom_max_set = snakemake.config["lines"].get("s_nom_max,", np.inf) p_nom_max_set = snakemake.config["links"].get("p_nom_max", np.inf) n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('prepare_network', network='elec', simpl='', - clusters='40', ll='v0.3', opts='Co2L-24H') + + snakemake = mock_snakemake( + "prepare_network", + network="elec", + simpl="", + clusters="40", + ll="v0.3", + opts="Co2L-24H", + ) configure_logging(snakemake) - opts = snakemake.wildcards.opts.split('-') + opts = snakemake.wildcards.opts.split("-") n = pypsa.Network(snakemake.input[0]) - Nyears = n.snapshot_weightings.objective.sum() / 8760. + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 set_line_s_max_pu(n) for o in opts: - m = re.match(r'^\d+h$', o, re.IGNORECASE) + m = re.match(r"^\d+h$", o, re.IGNORECASE) if m is not None: n = average_every_nhours(n, m.group(0)) break for o in opts: - m = re.match(r'^\d+seg$', o, re.IGNORECASE) + m = re.match(r"^\d+seg$", o, re.IGNORECASE) if m is not None: n = apply_time_segmentation(n, m.group(0)[:-3]) break @@ -264,9 +301,9 @@ def set_line_nom_max(n): comps = {"Generator", "Link", "StorageUnit", "Store"} for c in n.iterate_components(comps): sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel,attr] *= factor + c.df.loc[sel, attr] *= factor - if 'Ep' in opts: + if "Ep" in opts: add_emission_prices(n) ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index 868698796..2272b730c 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2019-2020 Fabian Hofmann (FIAS) # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # @@ -42,15 +43,18 @@ if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('retrieve_databundle') - rootpath = '..' + + snakemake = mock_snakemake("retrieve_databundle") + rootpath = ".." else: - rootpath = '.' - configure_logging(snakemake) # TODO Make logging compatible with progressbar (see PR #102) + rootpath = "." + configure_logging( + snakemake + ) # TODO Make logging compatible with progressbar (see PR #102) - if snakemake.config['tutorial']: + if snakemake.config["tutorial"]: url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz" else: url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz" diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 85bc4d15c..4a11ccb4f 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -97,7 +98,13 @@ import pypsa from pypsa.io import import_components_from_dataframe, import_series_from_dataframe -from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport, get_clustering_from_busmap, _make_consense +from pypsa.networkclustering import ( + busmap_by_stubs, + aggregategenerators, + aggregateoneport, + get_clustering_from_busmap, + _make_consense, +) logger = logging.getLogger(__name__) @@ -106,30 +113,34 @@ def simplify_network_to_380(n): ## All goes to v_nom == 380 logger.info("Mapping all network lines onto a single 380kV layer") - n.buses['v_nom'] = 380. - - linetype_380, = n.lines.loc[n.lines.v_nom == 380., 'type'].unique() - lines_v_nom_b = n.lines.v_nom != 380. - n.lines.loc[lines_v_nom_b, 'num_parallel'] *= (n.lines.loc[lines_v_nom_b, 'v_nom'] / 380.)**2 - n.lines.loc[lines_v_nom_b, 'v_nom'] = 380. - n.lines.loc[lines_v_nom_b, 'type'] = linetype_380 - n.lines.loc[lines_v_nom_b, 's_nom'] = ( - np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * - n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel + n.buses["v_nom"] = 380.0 + + (linetype_380,) = n.lines.loc[n.lines.v_nom == 380.0, "type"].unique() + lines_v_nom_b = n.lines.v_nom != 380.0 + n.lines.loc[lines_v_nom_b, "num_parallel"] *= ( + n.lines.loc[lines_v_nom_b, "v_nom"] / 380.0 + ) ** 2 + n.lines.loc[lines_v_nom_b, "v_nom"] = 380.0 + n.lines.loc[lines_v_nom_b, "type"] = linetype_380 + n.lines.loc[lines_v_nom_b, "s_nom"] = ( + np.sqrt(3) + * n.lines["type"].map(n.line_types.i_nom) + * n.lines.bus0.map(n.buses.v_nom) + * n.lines.num_parallel ) # Replace transformers by lines trafo_map = pd.Series(n.transformers.bus1.values, index=n.transformers.bus0.values) - trafo_map = trafo_map[~trafo_map.index.duplicated(keep='first')] + trafo_map = trafo_map[~trafo_map.index.duplicated(keep="first")] several_trafo_b = trafo_map.isin(trafo_map.index) trafo_map.loc[several_trafo_b] = trafo_map.loc[several_trafo_b].map(trafo_map) missing_buses_i = n.buses.index.difference(trafo_map.index) trafo_map = trafo_map.append(pd.Series(missing_buses_i, missing_buses_i)) - for c in n.one_port_components|n.branch_components: + for c in n.one_port_components | n.branch_components: df = n.df(c) for col in df.columns: - if col.startswith('bus'): + if col.startswith("bus"): df[col] = df[col].map(trafo_map) n.mremove("Transformer", n.transformers.index) @@ -139,26 +150,38 @@ def simplify_network_to_380(n): def _prepare_connection_costs_per_link(n): - if n.links.empty: return {} + if n.links.empty: + return {} Nyears = n.snapshot_weightings.objective.sum() / 8760 - costs = load_costs(Nyears, snakemake.input.tech_costs, - snakemake.config['costs'], snakemake.config['electricity']) + costs = load_costs( + Nyears, + snakemake.input.tech_costs, + snakemake.config["costs"], + snakemake.config["electricity"], + ) connection_costs_per_link = {} - for tech in snakemake.config['renewable']: - if tech.startswith('offwind'): + for tech in snakemake.config["renewable"]: + if tech.startswith("offwind"): connection_costs_per_link[tech] = ( - n.links.length * snakemake.config['lines']['length_factor'] * - (n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] + - (1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost']) + n.links.length + * snakemake.config["lines"]["length_factor"] + * ( + n.links.underwater_fraction + * costs.at[tech + "-connection-submarine", "capital_cost"] + + (1.0 - n.links.underwater_fraction) + * costs.at[tech + "-connection-underground", "capital_cost"] + ) ) return connection_costs_per_link -def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None, buses=None): +def _compute_connection_costs_to_bus( + n, busmap, connection_costs_per_link=None, buses=None +): if connection_costs_per_link is None: connection_costs_per_link = _prepare_connection_costs_per_link(n) @@ -168,12 +191,21 @@ def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None, connection_costs_to_bus = pd.DataFrame(index=buses) for tech in connection_costs_per_link: - adj = n.adjacency_matrix(weights=pd.concat(dict(Link=connection_costs_per_link[tech].reindex(n.links.index), - Line=pd.Series(0., n.lines.index)))) + adj = n.adjacency_matrix( + weights=pd.concat( + dict( + Link=connection_costs_per_link[tech].reindex(n.links.index), + Line=pd.Series(0.0, n.lines.index), + ) + ) + ) - costs_between_buses = dijkstra(adj, directed=False, indices=n.buses.index.get_indexer(buses)) - connection_costs_to_bus[tech] = costs_between_buses[np.arange(len(buses)), - n.buses.index.get_indexer(busmap.loc[buses])] + costs_between_buses = dijkstra( + adj, directed=False, indices=n.buses.index.get_indexer(buses) + ) + connection_costs_to_bus[tech] = costs_between_buses[ + np.arange(len(buses)), n.buses.index.get_indexer(busmap.loc[buses]) + ] return connection_costs_to_bus @@ -182,17 +214,29 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus): connection_costs = {} for tech in connection_costs_to_bus: tech_b = n.generators.carrier == tech - costs = n.generators.loc[tech_b, "bus"].map(connection_costs_to_bus[tech]).loc[lambda s: s>0] + costs = ( + n.generators.loc[tech_b, "bus"] + .map(connection_costs_to_bus[tech]) + .loc[lambda s: s > 0] + ) if not costs.empty: n.generators.loc[costs.index, "capital_cost"] += costs - logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} " - .format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems()))) + logger.info( + "Displacing {} generator(s) and adding connection costs to capital_costs: {} ".format( + tech, + ", ".join( + "{:.0f} Eur/MW/a for `{}`".format(d, b) + for b, d in costs.iteritems() + ), + ) + ) connection_costs[tech] = costs - pd.DataFrame(connection_costs).to_csv(snakemake.output.connection_costs) - + pd.DataFrame(connection_costs).to_csv(snakemake.output.connection_costs) -def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"}): +def _aggregate_and_move_components( + n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"} +): def replace_components(n, c, df, pnl): n.mremove(c, n.df(c).index) @@ -203,7 +247,9 @@ def replace_components(n, c, df, pnl): _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus) - generators, generators_pnl = aggregategenerators(n, busmap, custom_strategies={'p_nom_min': np.sum}) + generators, generators_pnl = aggregategenerators( + n, busmap, custom_strategies={"p_nom_min": np.sum} + ) replace_components(n, "Generator", generators, generators_pnl) for one_port in aggregate_one_ports: @@ -225,8 +271,10 @@ def simplify_links(n): return n, n.buses.index.to_series() # Determine connected link components, ignore all links but DC - adjacency_matrix = n.adjacency_matrix(branch_components=['Link'], - weights=dict(Link=(n.links.carrier == 'DC').astype(float))) + adjacency_matrix = n.adjacency_matrix( + branch_components=["Link"], + weights=dict(Link=(n.links.carrier == "DC").astype(float)), + ) _, labels = connected_components(adjacency_matrix, directed=False) labels = pd.Series(labels, n.buses.index) @@ -237,22 +285,23 @@ def split_links(nodes): nodes = frozenset(nodes) seen = set() - supernodes = {m for m in nodes - if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} + supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} for u in supernodes: for m, ls in G.adj[u].items(): - if m not in nodes or m in seen: continue + if m not in nodes or m in seen: + continue buses = [u, m] - links = [list(ls)] #[name for name in ls]] + links = [list(ls)] # [name for name in ls]] while m not in (supernodes | seen): seen.add(m) for m2, ls in G.adj[m].items(): - if m2 in seen or m2 == u: continue + if m2 in seen or m2 == u: + continue buses.append(m2) - links.append(list(ls)) # [name for name in ls]) + links.append(list(ls)) # [name for name in ls]) break else: # stub @@ -265,44 +314,62 @@ def split_links(nodes): busmap = n.buses.index.to_series() connection_costs_per_link = _prepare_connection_costs_per_link(n) - connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link)) + connection_costs_to_bus = pd.DataFrame( + 0.0, index=n.buses.index, columns=list(connection_costs_per_link) + ) for lbl in labels.value_counts().loc[lambda s: s > 2].index: for b, buses, links in split_links(labels.index[labels == lbl]): - if len(buses) <= 2: continue + if len(buses) <= 2: + continue - logger.debug('nodes = {}'.format(labels.index[labels == lbl])) - logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links)) + logger.debug("nodes = {}".format(labels.index[labels == lbl])) + logger.debug("b = {}\nbuses = {}\nlinks = {}".format(b, buses, links)) - m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']], - n.buses.loc[buses[1:-1], ['x', 'y']]) + m = sp.spatial.distance_matrix( + n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]] + ) busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] - connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link, buses) + connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus( + n, busmap, connection_costs_per_link, buses + ) all_links = [i for _, i in sum(links, [])] - p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) - lengths = n.links.loc[all_links, 'length'] - name = lengths.idxmax() + '+{}'.format(len(links) - 1) + p_max_pu = snakemake.config["links"].get("p_max_pu", 1.0) + lengths = n.links.loc[all_links, "length"] + name = lengths.idxmax() + "+{}".format(len(links) - 1) params = dict( - carrier='DC', - bus0=b[0], bus1=b[1], - length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links), - p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links), - underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']), + carrier="DC", + bus0=b[0], + bus1=b[1], + length=sum( + n.links.loc[[i for _, i in l], "length"].mean() for l in links + ), + p_nom=min(n.links.loc[[i for _, i in l], "p_nom"].sum() for l in links), + underwater_fraction=sum( + lengths + / lengths.sum() + * n.links.loc[all_links, "underwater_fraction"] + ), p_max_pu=p_max_pu, p_min_pu=-p_max_pu, underground=False, - under_construction=False + under_construction=False, ) - logger.info("Joining the links {} connecting the buses {} to simple link {}".format(", ".join(all_links), ", ".join(buses), name)) + logger.info( + "Joining the links {} connecting the buses {} to simple link {}".format( + ", ".join(all_links), ", ".join(buses), name + ) + ) n.mremove("Link", all_links) static_attrs = n.components["Link"]["attrs"].loc[lambda df: df.static] - for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default) + for attr, default in static_attrs.default.iteritems(): + params.setdefault(attr, default) n.links.loc[name] = pd.Series(params) # n.add("Link", **params) @@ -312,10 +379,11 @@ def split_links(nodes): _aggregate_and_move_components(n, busmap, connection_costs_to_bus) return n, busmap + def remove_stubs(n): logger.info("Removing stubs") - busmap = busmap_by_stubs(n) # ['country']) + busmap = busmap_by_stubs(n) # ['country']) connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap) @@ -323,23 +391,34 @@ def remove_stubs(n): return n, busmap + def aggregate_to_substations(n, buses_i=None): # can be used to aggregate a selection of buses to electrically closest neighbors # if no buses are given, nodes that are no substations or without offshore connection are aggregated - + if buses_i is None: - logger.info("Aggregating buses that are no substations or have no valid offshore connection") - buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus)) + logger.info( + "Aggregating buses that are no substations or have no valid offshore connection" + ) + buses_i = list(set(n.buses.index) - set(n.generators.bus) - set(n.loads.bus)) - weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3), - 'Link': n.links.length/n.links.p_nom.clip(1e-3)}) + weight = pd.concat( + { + "Line": n.lines.length / n.lines.s_nom.clip(1e-3), + "Link": n.links.length / n.links.p_nom.clip(1e-3), + } + ) - adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight) + adj = n.adjacency_matrix(branch_components=["Line", "Link"], weights=weight) bus_indexer = n.buses.index.get_indexer(buses_i) - dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index) + dist = pd.DataFrame( + dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index + ) - dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i + dist[ + buses_i + ] = np.inf # bus in buses_i should not be assigned to different bus in buses_i for c in n.buses.country.unique(): incountry_b = n.buses.country == c @@ -348,46 +427,70 @@ def aggregate_to_substations(n, buses_i=None): busmap = n.buses.index.to_series() busmap.loc[buses_i] = dist.idxmin(1) - clustering = get_clustering_from_busmap(n, busmap, - bus_strategies=dict(country=_make_consense("Bus", "country")), - aggregate_generators_weighted=True, - aggregate_generators_carriers=None, - aggregate_one_ports=["Load", "StorageUnit"], - line_length_factor=1.0, - generator_strategies={'p_nom_max': 'sum'}, - scale_link_capital_costs=False) - + clustering = get_clustering_from_busmap( + n, + busmap, + bus_strategies=dict(country=_make_consense("Bus", "country")), + aggregate_generators_weighted=True, + aggregate_generators_carriers=None, + aggregate_one_ports=["Load", "StorageUnit"], + line_length_factor=1.0, + generator_strategies={"p_nom_max": "sum"}, + scale_link_capital_costs=False, + ) + return clustering.network, busmap def cluster(n, n_clusters): logger.info(f"Clustering to {n_clusters} buses") - focus_weights = snakemake.config.get('focus_weights', None) - - renewable_carriers = pd.Index([tech - for tech in n.generators.carrier.unique() - if tech.split('-', 2)[0] in snakemake.config['renewable']]) + focus_weights = snakemake.config.get("focus_weights", None) + + renewable_carriers = pd.Index( + [ + tech + for tech in n.generators.carrier.unique() + if tech.split("-", 2)[0] in snakemake.config["renewable"] + ] + ) + def consense(x): v = x.iat[0] - assert ((x == v).all() or x.isnull().all()), ( - "The `potential` configuration option must agree for all renewable carriers, for now!" - ) + assert ( + x == v + ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" return v - potential_mode = (consense(pd.Series([snakemake.config['renewable'][tech]['potential'] - for tech in renewable_carriers])) - if len(renewable_carriers) > 0 else 'conservative') - clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode, - solver_name=snakemake.config['solving']['solver']['name'], - focus_weights=focus_weights) + + potential_mode = ( + consense( + pd.Series( + [ + snakemake.config["renewable"][tech]["potential"] + for tech in renewable_carriers + ] + ) + ) + if len(renewable_carriers) > 0 + else "conservative" + ) + clustering = clustering_for_n_clusters( + n, + n_clusters, + custom_busmap=False, + potential_mode=potential_mode, + solver_name=snakemake.config["solving"]["solver"]["name"], + focus_weights=focus_weights, + ) return clustering.network, clustering.busmap if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('simplify_network', simpl='', network='elec') + + snakemake = mock_snakemake("simplify_network", simpl="", network="elec") configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) @@ -400,7 +503,11 @@ def consense(x): busmaps = [trafo_map, simplify_links_map, stub_map] - if snakemake.config.get('clustering', {}).get('simplify', {}).get('to_substations', False): + if ( + snakemake.config.get("clustering", {}) + .get("simplify", {}) + .get("to_substations", False) + ): n, substation_map = aggregate_to_substations(n) busmaps.append(substation_map) @@ -410,11 +517,17 @@ def consense(x): # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed # and are lost when clustering (for example with the simpl wildcard), we remove them for consistency: - buses_c = {'symbol', 'tags', 'under_construction', 'substation_lv', 'substation_off'}.intersection(n.buses.columns) + buses_c = { + "symbol", + "tags", + "under_construction", + "substation_lv", + "substation_off", + }.intersection(n.buses.columns) n.buses = n.buses.drop(buses_c, axis=1) update_p_nom_max(n) - + n.export_to_netcdf(snakemake.output.network) busmap_s = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 6619f2d7f..22da63c46 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -84,8 +85,14 @@ import re import pypsa -from pypsa.linopf import (get_var, define_constraints, linexpr, join_exprs, - network_lopf, ilopf) +from pypsa.linopf import ( + get_var, + define_constraints, + linexpr, + join_exprs, + network_lopf, + ilopf, +) from pathlib import Path from vresutils.benchmark import memory_logger @@ -95,79 +102,94 @@ def prepare_network(n, solve_opts): - if 'clip_p_max_pu' in solve_opts: + if "clip_p_max_pu" in solve_opts: for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow): - df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True) + df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) - if solve_opts.get('load_shedding'): + if solve_opts.get("load_shedding"): n.add("Carrier", "Load") buses_i = n.buses.query("carrier == 'AC'").index - n.madd("Generator", buses_i, " load", - bus=buses_i, - carrier='load', - sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW - marginal_cost=1e2, # Eur/kWh - # intersect between macroeconomic and surveybased - # willingness to pay - # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full - p_nom=1e9 # kW - ) - - if solve_opts.get('noisy_costs'): + n.madd( + "Generator", + buses_i, + " load", + bus=buses_i, + carrier="load", + sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW + marginal_cost=1e2, # Eur/kWh + # intersect between macroeconomic and surveybased + # willingness to pay + # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full + p_nom=1e9, # kW + ) + + if solve_opts.get("noisy_costs"): for t in n.iterate_components(n.one_port_components): - #if 'capital_cost' in t.df: + # if 'capital_cost' in t.df: # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) - if 'marginal_cost' in t.df: - t.df['marginal_cost'] += (1e-2 + 2e-3 * - (np.random.random(len(t.df)) - 0.5)) - - for t in n.iterate_components(['Line', 'Link']): - t.df['capital_cost'] += (1e-1 + - 2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length'] - - if solve_opts.get('nhours'): - nhours = solve_opts['nhours'] + if "marginal_cost" in t.df: + t.df["marginal_cost"] += 1e-2 + 2e-3 * ( + np.random.random(len(t.df)) - 0.5 + ) + + for t in n.iterate_components(["Line", "Link"]): + t.df["capital_cost"] += ( + 1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5) + ) * t.df["length"] + + if solve_opts.get("nhours"): + nhours = solve_opts["nhours"] n.set_snapshots(n.snapshots[:nhours]) - n.snapshot_weightings[:] = 8760. / nhours + n.snapshot_weightings[:] = 8760.0 / nhours return n def add_CCL_constraints(n, config): - agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits') + agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits") try: - agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, - index_col=list(range(2))) + agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2))) except IOError: - logger.exception("Need to specify the path to a .csv file containing " - "aggregate capacity limits per country in " - "config['electricity']['agg_p_nom_limit'].") - logger.info("Adding per carrier generation capacity constraints for " - "individual countries") + logger.exception( + "Need to specify the path to a .csv file containing " + "aggregate capacity limits per country in " + "config['electricity']['agg_p_nom_limit']." + ) + logger.info( + "Adding per carrier generation capacity constraints for " "individual countries" + ) gen_country = n.generators.bus.map(n.buses.country) # cc means country and carrier - p_nom_per_cc = (pd.DataFrame( - {'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))), - 'country': gen_country, 'carrier': n.generators.carrier}) - .dropna(subset=['p_nom']) - .groupby(['country', 'carrier']).p_nom - .apply(join_exprs)) - minimum = agg_p_nom_minmax['min'].dropna() + p_nom_per_cc = ( + pd.DataFrame( + { + "p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))), + "country": gen_country, + "carrier": n.generators.carrier, + } + ) + .dropna(subset=["p_nom"]) + .groupby(["country", "carrier"]) + .p_nom.apply(join_exprs) + ) + minimum = agg_p_nom_minmax["min"].dropna() if not minimum.empty: - minconstraint = define_constraints(n, p_nom_per_cc[minimum.index], - '>=', minimum, 'agg_p_nom', 'min') - maximum = agg_p_nom_minmax['max'].dropna() + minconstraint = define_constraints( + n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min" + ) + maximum = agg_p_nom_minmax["max"].dropna() if not maximum.empty: - maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index], - '<=', maximum, 'agg_p_nom', 'max') + maxconstraint = define_constraints( + n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max" + ) def add_EQ_constraints(n, o, scaling=1e-1): float_regex = "[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) - if o[-1] == 'c': + if o[-1] == "c": ggrouper = n.generators.bus.map(n.buses.country) lgrouper = n.loads.bus.map(n.buses.country) sgrouper = n.storage_units.bus.map(n.buses.country) @@ -175,51 +197,75 @@ def add_EQ_constraints(n, o, scaling=1e-1): ggrouper = n.generators.bus lgrouper = n.loads.bus sgrouper = n.storage_units.bus - load = n.snapshot_weightings.generators @ \ - n.loads_t.p_set.groupby(lgrouper, axis=1).sum() - inflow = n.snapshot_weightings.stores @ \ - n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum() - inflow = inflow.reindex(load.index).fillna(0.) - rhs = scaling * ( level * load - inflow ) - lhs_gen = linexpr((n.snapshot_weightings.generators * scaling, - get_var(n, "Generator", "p").T) - ).T.groupby(ggrouper, axis=1).apply(join_exprs) - lhs_spill = linexpr((-n.snapshot_weightings.stores * scaling, - get_var(n, "StorageUnit", "spill").T) - ).T.groupby(sgrouper, axis=1).apply(join_exprs) + load = ( + n.snapshot_weightings.generators + @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum() + ) + inflow = ( + n.snapshot_weightings.stores + @ n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum() + ) + inflow = inflow.reindex(load.index).fillna(0.0) + rhs = scaling * (level * load - inflow) + lhs_gen = ( + linexpr( + (n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T) + ) + .T.groupby(ggrouper, axis=1) + .apply(join_exprs) + ) + lhs_spill = ( + linexpr( + ( + -n.snapshot_weightings.stores * scaling, + get_var(n, "StorageUnit", "spill").T, + ) + ) + .T.groupby(sgrouper, axis=1) + .apply(join_exprs) + ) lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("") lhs = lhs_gen + lhs_spill define_constraints(n, lhs, ">=", rhs, "equity", "min") def add_BAU_constraints(n, config): - mincaps = pd.Series(config['electricity']['BAU_mincapacities']) - lhs = (linexpr((1, get_var(n, 'Generator', 'p_nom'))) - .groupby(n.generators.carrier).apply(join_exprs)) - define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps') + mincaps = pd.Series(config["electricity"]["BAU_mincapacities"]) + lhs = ( + linexpr((1, get_var(n, "Generator", "p_nom"))) + .groupby(n.generators.carrier) + .apply(join_exprs) + ) + define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps") def add_SAFE_constraints(n, config): - peakdemand = (1. + config['electricity']['SAFE_reservemargin']) *\ - n.loads_t.p_set.sum(axis=1).max() - conv_techs = config['plotting']['conv_techs'] - exist_conv_caps = n.generators.query('~p_nom_extendable & carrier in @conv_techs')\ - .p_nom.sum() - ext_gens_i = n.generators.query('carrier in @conv_techs & p_nom_extendable').index - lhs = linexpr((1, get_var(n, 'Generator', 'p_nom')[ext_gens_i])).sum() + peakdemand = ( + 1.0 + config["electricity"]["SAFE_reservemargin"] + ) * n.loads_t.p_set.sum(axis=1).max() + conv_techs = config["plotting"]["conv_techs"] + exist_conv_caps = n.generators.query( + "~p_nom_extendable & carrier in @conv_techs" + ).p_nom.sum() + ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index + lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum() rhs = peakdemand - exist_conv_caps - define_constraints(n, lhs, '>=', rhs, 'Safe', 'mintotalcap') + define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap") def add_battery_constraints(n): nodes = n.buses.index[n.buses.carrier == "battery"] - if nodes.empty or ('Link', 'p_nom') not in n.variables.index: + if nodes.empty or ("Link", "p_nom") not in n.variables.index: return link_p_nom = get_var(n, "Link", "p_nom") - lhs = linexpr((1,link_p_nom[nodes + " charger"]), - (-n.links.loc[nodes + " discharger", "efficiency"].values, - link_p_nom[nodes + " discharger"].values)) - define_constraints(n, lhs, "=", 0, 'Link', 'charger_ratio') + lhs = linexpr( + (1, link_p_nom[nodes + " charger"]), + ( + -n.links.loc[nodes + " discharger", "efficiency"].values, + link_p_nom[nodes + " discharger"].values, + ), + ) + define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") def extra_functionality(n, snapshots): @@ -230,11 +276,11 @@ def extra_functionality(n, snapshots): """ opts = n.opts config = n.config - if 'BAU' in opts and n.generators.p_nom_extendable.any(): + if "BAU" in opts and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) - if 'SAFE' in opts and n.generators.p_nom_extendable.any(): + if "SAFE" in opts and n.generators.p_nom_extendable.any(): add_SAFE_constraints(n, config) - if 'CCL' in opts and n.generators.p_nom_extendable.any(): + if "CCL" in opts and n.generators.p_nom_extendable.any(): add_CCL_constraints(n, config) for o in opts: if "EQ" in o: @@ -242,50 +288,71 @@ def extra_functionality(n, snapshots): add_battery_constraints(n) -def solve_network(n, config, opts='', **kwargs): - solver_options = config['solving']['solver'].copy() - solver_name = solver_options.pop('name') - cf_solving = config['solving']['options'] - track_iterations = cf_solving.get('track_iterations', False) - min_iterations = cf_solving.get('min_iterations', 4) - max_iterations = cf_solving.get('max_iterations', 6) +def solve_network(n, config, opts="", **kwargs): + solver_options = config["solving"]["solver"].copy() + solver_name = solver_options.pop("name") + cf_solving = config["solving"]["options"] + track_iterations = cf_solving.get("track_iterations", False) + min_iterations = cf_solving.get("min_iterations", 4) + max_iterations = cf_solving.get("max_iterations", 6) # add to network for extra_functionality n.config = config n.opts = opts - if cf_solving.get('skip_iterations', False): - network_lopf(n, solver_name=solver_name, solver_options=solver_options, - extra_functionality=extra_functionality, **kwargs) + if cf_solving.get("skip_iterations", False): + network_lopf( + n, + solver_name=solver_name, + solver_options=solver_options, + extra_functionality=extra_functionality, + **kwargs + ) else: - ilopf(n, solver_name=solver_name, solver_options=solver_options, - track_iterations=track_iterations, - min_iterations=min_iterations, - max_iterations=max_iterations, - extra_functionality=extra_functionality, **kwargs) + ilopf( + n, + solver_name=solver_name, + solver_options=solver_options, + track_iterations=track_iterations, + min_iterations=min_iterations, + max_iterations=max_iterations, + extra_functionality=extra_functionality, + **kwargs + ) return n if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('solve_network', network='elec', simpl='', - clusters='5', ll='copt', opts='Co2L-BAU-CCL-24H') + + snakemake = mock_snakemake( + "solve_network", + network="elec", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-BAU-CCL-24H", + ) configure_logging(snakemake) - tmpdir = snakemake.config['solving'].get('tmpdir') + tmpdir = snakemake.config["solving"].get("tmpdir") if tmpdir is not None: Path(tmpdir).mkdir(parents=True, exist_ok=True) - opts = snakemake.wildcards.opts.split('-') - solve_opts = snakemake.config['solving']['options'] + opts = snakemake.wildcards.opts.split("-") + solve_opts = snakemake.config["solving"]["options"] - fn = getattr(snakemake.log, 'memory', None) - with memory_logger(filename=fn, interval=30.) as mem: + fn = getattr(snakemake.log, "memory", None) + with memory_logger(filename=fn, interval=30.0) as mem: n = pypsa.Network(snakemake.input[0]) n = prepare_network(n, solve_opts) - n = solve_network(n, config=snakemake.config, opts=opts, - solver_dir=tmpdir, - solver_logfile=snakemake.log.solver) + n = solve_network( + n, + config=snakemake.config, + opts=opts, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver, + ) n.export_to_netcdf(snakemake.output[0]) logger.info("Maximum memory usage: {}".format(mem.mem_usage)) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index 74506e5a3..2fe292d21 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -57,50 +58,68 @@ logger = logging.getLogger(__name__) + def set_parameters_from_optimized(n, n_optim): - lines_typed_i = n.lines.index[n.lines.type != ''] - n.lines.loc[lines_typed_i, 'num_parallel'] = \ - n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.) - n.lines.loc[lines_typed_i, 's_nom'] = ( - np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * - n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel) - - lines_untyped_i = n.lines.index[n.lines.type == ''] - for attr in ('s_nom', 'r', 'x'): - n.lines.loc[lines_untyped_i, attr] = \ - n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.) - n.lines['s_nom_extendable'] = False + lines_typed_i = n.lines.index[n.lines.type != ""] + n.lines.loc[lines_typed_i, "num_parallel"] = n_optim.lines["num_parallel"].reindex( + lines_typed_i, fill_value=0.0 + ) + n.lines.loc[lines_typed_i, "s_nom"] = ( + np.sqrt(3) + * n.lines["type"].map(n.line_types.i_nom) + * n.lines.bus0.map(n.buses.v_nom) + * n.lines.num_parallel + ) + + lines_untyped_i = n.lines.index[n.lines.type == ""] + for attr in ("s_nom", "r", "x"): + n.lines.loc[lines_untyped_i, attr] = n_optim.lines[attr].reindex( + lines_untyped_i, fill_value=0.0 + ) + n.lines["s_nom_extendable"] = False links_dc_i = n.links.index[n.links.p_nom_extendable] - n.links.loc[links_dc_i, 'p_nom'] = \ - n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.) - n.links.loc[links_dc_i, 'p_nom_extendable'] = False + n.links.loc[links_dc_i, "p_nom"] = n_optim.links["p_nom_opt"].reindex( + links_dc_i, fill_value=0.0 + ) + n.links.loc[links_dc_i, "p_nom_extendable"] = False gen_extend_i = n.generators.index[n.generators.p_nom_extendable] - n.generators.loc[gen_extend_i, 'p_nom'] = \ - n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.) - n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False + n.generators.loc[gen_extend_i, "p_nom"] = n_optim.generators["p_nom_opt"].reindex( + gen_extend_i, fill_value=0.0 + ) + n.generators.loc[gen_extend_i, "p_nom_extendable"] = False stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable] - n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \ - n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.) - n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False + n.storage_units.loc[stor_units_extend_i, "p_nom"] = n_optim.storage_units[ + "p_nom_opt" + ].reindex(stor_units_extend_i, fill_value=0.0) + n.storage_units.loc[stor_units_extend_i, "p_nom_extendable"] = False stor_extend_i = n.stores.index[n.stores.e_nom_extendable] - n.stores.loc[stor_extend_i, 'e_nom'] = \ - n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.) - n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False + n.stores.loc[stor_extend_i, "e_nom"] = n_optim.stores["e_nom_opt"].reindex( + stor_extend_i, fill_value=0.0 + ) + n.stores.loc[stor_extend_i, "e_nom_extendable"] = False return n + if __name__ == "__main__": - if 'snakemake' not in globals(): + if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('solve_operations_network', network='elec', - simpl='', clusters='5', ll='copt', opts='Co2L-BAU-24H') + + snakemake = mock_snakemake( + "solve_operations_network", + network="elec", + simpl="", + clusters="5", + ll="copt", + opts="Co2L-BAU-24H", + ) configure_logging(snakemake) - tmpdir = snakemake.config['solving'].get('tmpdir') + tmpdir = snakemake.config["solving"].get("tmpdir") if tmpdir is not None: Path(tmpdir).mkdir(parents=True, exist_ok=True) @@ -110,15 +129,19 @@ def set_parameters_from_optimized(n, n_optim): del n_optim config = snakemake.config - opts = snakemake.wildcards.opts.split('-') - config['solving']['options']['skip_iterations'] = False - - fn = getattr(snakemake.log, 'memory', None) - with memory_logger(filename=fn, interval=30.) as mem: - n = prepare_network(n, solve_opts=snakemake.config['solving']['options']) - n = solve_network(n, config=config, opts=opts, - solver_dir=tmpdir, - solver_logfile=snakemake.log.solver) + opts = snakemake.wildcards.opts.split("-") + config["solving"]["options"]["skip_iterations"] = False + + fn = getattr(snakemake.log, "memory", None) + with memory_logger(filename=fn, interval=30.0) as mem: + n = prepare_network(n, solve_opts=snakemake.config["solving"]["options"]) + n = solve_network( + n, + config=config, + opts=opts, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver, + ) n.export_to_netcdf(snakemake.output[0]) logger.info("Maximum memory usage: {}".format(mem.mem_usage)) diff --git a/test/config.test1.yaml b/test/config.test1.yaml index 2986037bd..364b9c82e 100755 --- a/test/config.test1.yaml +++ b/test/config.test1.yaml @@ -74,8 +74,7 @@ renewable: corine: # Scholz, Y. (2012). Renewable energy based electricity supply at low costs: # development of the REMix model and application for Europe. ( p.42 / p.28) - grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 31, 32] + grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] natura: true @@ -121,8 +120,7 @@ renewable: # sector: The economic potential of photovoltaics and concentrating solar # power." Applied Energy 135 (2014): 704-720. correction_factor: 0.854337 - corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] natura: true potential: simple # or conservative clip_p_max_pu: 1.e-2 @@ -149,7 +147,7 @@ transformers: type: '' load: - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false @@ -200,7 +198,7 @@ solving: plotting: map: figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] + boundaries: [-10.2, 29, 35, 72] p_nom: bus_size_factor: 5.e+4 linewidth_factor: 3.e+3 @@ -219,50 +217,50 @@ plotting: AC_carriers: ["AC line", "AC transformer"] link_carriers: ["DC line", "Converter AC-DC"] tech_colors: - "onwind" : "#235ebc" - "onshore wind" : "#235ebc" - 'offwind' : "#6895dd" - 'offwind-ac' : "#6895dd" - 'offshore wind' : "#6895dd" - 'offshore wind ac' : "#6895dd" - 'offwind-dc' : "#74c6f2" - 'offshore wind dc' : "#74c6f2" - "hydro" : "#08ad97" - "hydro+PHS" : "#08ad97" - "PHS" : "#08ad97" - "hydro reservoir" : "#08ad97" - 'hydroelectricity' : '#08ad97' - "ror" : "#4adbc8" - "run of river" : "#4adbc8" - 'solar' : "#f9d002" - 'solar PV' : "#f9d002" - 'solar thermal' : '#ffef60' - 'biomass' : '#0c6013' - 'solid biomass' : '#06540d' - 'biogas' : '#23932d' - 'waste' : '#68896b' - 'geothermal' : '#ba91b1' - "OCGT" : "#d35050" - "gas" : "#d35050" - "natural gas" : "#d35050" - "CCGT" : "#b20101" - "nuclear" : "#ff9000" - "coal" : "#707070" - "lignite" : "#9e5a01" - "oil" : "#262626" - "H2" : "#ea048a" - "hydrogen storage" : "#ea048a" - "battery" : "#b8ea04" - "Electric load" : "#f9d002" - "electricity" : "#f9d002" - "lines" : "#70af1d" - "transmission lines" : "#70af1d" - "AC-AC" : "#70af1d" - "AC line" : "#70af1d" - "links" : "#8a1caf" - "HVDC links" : "#8a1caf" - "DC-DC" : "#8a1caf" - "DC link" : "#8a1caf" + "onwind": "#235ebc" + "onshore wind": "#235ebc" + 'offwind': "#6895dd" + 'offwind-ac': "#6895dd" + 'offshore wind': "#6895dd" + 'offshore wind ac': "#6895dd" + 'offwind-dc': "#74c6f2" + 'offshore wind dc': "#74c6f2" + "hydro": "#08ad97" + "hydro+PHS": "#08ad97" + "PHS": "#08ad97" + "hydro reservoir": "#08ad97" + 'hydroelectricity': '#08ad97' + "ror": "#4adbc8" + "run of river": "#4adbc8" + 'solar': "#f9d002" + 'solar PV': "#f9d002" + 'solar thermal': '#ffef60' + 'biomass': '#0c6013' + 'solid biomass': '#06540d' + 'biogas': '#23932d' + 'waste': '#68896b' + 'geothermal': '#ba91b1' + "OCGT": "#d35050" + "gas": "#d35050" + "natural gas": "#d35050" + "CCGT": "#b20101" + "nuclear": "#ff9000" + "coal": "#707070" + "lignite": "#9e5a01" + "oil": "#262626" + "H2": "#ea048a" + "hydrogen storage": "#ea048a" + "battery": "#b8ea04" + "Electric load": "#f9d002" + "electricity": "#f9d002" + "lines": "#70af1d" + "transmission lines": "#70af1d" + "AC-AC": "#70af1d" + "AC line": "#70af1d" + "links": "#8a1caf" + "HVDC links": "#8a1caf" + "DC-DC": "#8a1caf" + "DC link": "#8a1caf" nice_names: OCGT: "Open-Cycle Gas" CCGT: "Combined-Cycle Gas" From aa9e25337b3badf1e2f509c154aef04564e19e88 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Wed, 12 Jan 2022 11:39:38 +0100 Subject: [PATCH 14/20] Fix CI failing due to misaligned indentation from YAML formatting environment.yaml. glpk and ipopt should be installed via conda, due to indentation mismatch they were installed via pip (which failed). --- .github/workflows/ci.yaml | 2 +- envs/environment.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c8a605630..8686ad047 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -44,7 +44,7 @@ jobs: - name: Install dependencies run: | echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc - echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml + echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml mamba env update -f envs/environment.yaml --name test - name: Test snakemake workflow diff --git a/envs/environment.yaml b/envs/environment.yaml index 692def7c2..d62820bfd 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -1,8 +1,8 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT -name: pypsa-eur-2 +name: pypsa-eur channels: - conda-forge - bioconda From 04e022c2e3bb65b5bb2ba061ae5dfd5980064e94 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Wed, 12 Jan 2022 14:31:46 +0100 Subject: [PATCH 15/20] Add iosort and codespell for pre-commit. --- .pre-commit-config.yaml | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b392bd3a0..aa17c48d6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,6 +13,21 @@ repos: - id: end-of-file-fixer - id: fix-encoding-pragma +# Sort package imports alphabetically +- repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + args: ["--profile", "black", "--filter-files"] + +# Find common spelling mistakes in comments and docstrings +- repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + types_or: [python, rst, markdown] + files: ^(scripts|doc)/ + # Formatting with "black" coding style - repo: https://github.com/psf/black rev: 21.12b0 @@ -22,6 +37,13 @@ repos: # Format Jupyter Python notebooks - id: black-jupyter +# Do YAML formatting (before the linter checks it for misses) +- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks + rev: v2.2.0 + hooks: + - id: pretty-format-yaml + args: [--autofix, --indent, '2', --preserve-quotes] + # Use yamllint to check for valid YAML files and list syntax errors - repo: https://github.com/adrienverge/yamllint.git rev: v1.26.3 @@ -29,17 +51,11 @@ repos: - id: yamllint args: [--format, parsable, -c=.yamllint] -- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - rev: v2.2.0 - hooks: - - id: pretty-format-yaml - args: [--autofix, --indent, '2', --preserve-quotes] - # Format Snakemake rule / workflow files -- repo: https://github.com/snakemake/snakefmt - rev: 0.4.4 - hooks: - - id: snakefmt +- repo: https://github.com/snakemake/snakefmt + rev: 0.4.4 + hooks: + - id: snakefmt # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool From 4aa7578dac35ef85c49192aa96d2253b9e33631e Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Wed, 12 Jan 2022 14:58:30 +0100 Subject: [PATCH 16/20] Have codespell ignore country codes and all CAPS words. --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aa17c48d6..b91afb1c3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,6 +25,7 @@ repos: rev: v2.1.0 hooks: - id: codespell + args: ['--ignore-regex="\b[A-Z]+\b"'] # Ignore capital case words, e.g. country codes types_or: [python, rst, markdown] files: ^(scripts|doc)/ From 73caf5308907df7f89de63d112899de81434a7f1 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Wed, 12 Jan 2022 14:58:41 +0100 Subject: [PATCH 17/20] Fix typos detected by codespell. --- doc/cloudcomputing.rst | 4 ++-- doc/contributing.rst | 2 +- doc/release_notes.rst | 4 ++-- doc/tutorial.rst | 2 +- doc/wildcards.rst | 2 +- scripts/make_summary.py | 4 ++-- scripts/simplify_network.py | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/cloudcomputing.rst b/doc/cloudcomputing.rst index f751d6244..6df8b2b33 100644 --- a/doc/cloudcomputing.rst +++ b/doc/cloudcomputing.rst @@ -72,7 +72,7 @@ Step 3 - Installation of Cloud SDK - Download Google Cloud SDK `SDK `_. Check that you are logged in in your Google account. The link should lead you to the Windows installation of Google Cloud SDK. - Follow the "Quickstart for Windows - Before you begin" steps. -- After the successfull installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell": +- After the successful installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell": .. code:: bash @@ -107,7 +107,7 @@ Make sure that your instance is operating for the next steps. - Click on the advanced setting. SSH -> Authentication. - Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file). - Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance. -- Click ok and save. Then click Login. If successfull WinSCP will open on the left side your local computer folder structure and on the right side the folder strucutre of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successfull) +- Click ok and save. Then click Login. If successful WinSCP will open on the left side your local computer folder structure and on the right side the folder structure of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successful) If you had struggle with the above steps, you could also try `this video `_. diff --git a/doc/contributing.rst b/doc/contributing.rst index d57f12127..aa1111e98 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -19,7 +19,7 @@ to our `GitHub repository `_. * We encourage you to use the `PEP 8 coding style `_. For all code contributions we follow the four eyes principle (two person principle), i.e. all suggested code -including our own are reviewed by a second person before they are incoporated into our repository. +including our own are reviewed by a second person before they are incorporated into our repository. If you are unfamiliar with pull requests, the GitHub help pages have a nice `guide `_. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 0423a581d..82bc2f071 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -60,7 +60,7 @@ PyPSA-Eur 0.4.0 (22th September 2021) [`#261 `_]. * The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to - ``cutouts/europe-2013-era5-tutorial.nc`` to accomodate tutorial and productive + ``cutouts/europe-2013-era5-tutorial.nc`` to accommodate tutorial and productive cutouts side-by-side. * The flag ``keep_all_available_areas`` in the configuration for renewable @@ -254,7 +254,7 @@ PyPSA-Eur 0.2.0 (8th June 2020) * Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 `_]. -* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `_]. +* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all available weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `_]. * Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 `_]. diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 17d4e3c17..fb4cc113b 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -247,7 +247,7 @@ Once the whole worktree is finished, it should show state so in the terminal: You will notice that many intermediate stages are saved, namely the outputs of each individual ``snakemake`` rule. -You can produce any output file occuring in the ``Snakefile`` by running +You can produce any output file occurring in the ``Snakefile`` by running .. code:: bash diff --git a/doc/wildcards.rst b/doc/wildcards.rst index 2290de67f..3b3d7eef2 100644 --- a/doc/wildcards.rst +++ b/doc/wildcards.rst @@ -123,7 +123,7 @@ These cutouts will be stored in a folder specified by ``{cutout}``. The ``{technology}`` wildcard ============================= -The ``{technology}`` wildcard specifies for which renewable energy technology to produce availablity time +The ``{technology}`` wildcard specifies for which renewable energy technology to produce availability time series and potentials using the rule :mod:`build_renewable_profiles`. It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, and ``solar`` but **not** ``hydro`` (since hydroelectric plant profiles are created by a different rule). diff --git a/scripts/make_summary.py b/scripts/make_summary.py index fe63d4edc..f0510ecea 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -33,7 +33,7 @@ Description ----------- -The following rule can be used to summarize the results in seperate .csv files: +The following rule can be used to summarize the results in separate .csv files: .. code:: @@ -46,7 +46,7 @@ the line volume/cost cap field can be set to one of the following: * ``lv1.25`` for a particular line volume extension by 25% * ``lc1.25`` for a line cost extension by 25 % -* ``lall`` for all evalutated caps +* ``lall`` for all evaluated caps * ``lvall`` for all line volume caps * ``lcall`` for all line cost caps diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 4a11ccb4f..4d655bb0d 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -76,7 +76,7 @@ 1. Create an equivalent transmission network in which all voltage levels are mapped to the 380 kV level by the function ``simplify_network(...)``. -2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the captial costs of the generator. +2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the capital costs of the generator. 3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``. Components are moved along. From e2245c7513f6b99dd52e3ab611d72b0279b76e17 Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Wed, 12 Jan 2022 15:00:57 +0100 Subject: [PATCH 18/20] Run isort on all Python files. --- doc/conf.py | 2 +- scripts/_helpers.py | 9 ++++++--- scripts/add_electricity.py | 13 ++++++------- scripts/add_extra_components.py | 11 +++++------ scripts/base_network.py | 20 ++++++++++---------- scripts/build_bus_regions.py | 9 ++++----- scripts/build_cutout.py | 2 +- scripts/build_hydro_profile.py | 2 +- scripts/build_load_data.py | 7 +++---- scripts/build_natura_raster.py | 2 +- scripts/build_powerplants.py | 9 ++++----- scripts/build_renewable_profiles.py | 14 +++++++------- scripts/build_shapes.py | 11 +++++------ scripts/cluster_network.py | 22 +++++++++------------- scripts/make_summary.py | 7 +++---- scripts/plot_network.py | 19 +++++++++---------- scripts/plot_p_nom_max.py | 6 +++--- scripts/plot_summary.py | 6 +++--- scripts/prepare_links_p_nom.py | 2 +- scripts/prepare_network.py | 7 +++---- scripts/retrieve_databundle.py | 4 ++-- scripts/simplify_network.py | 21 +++++++++------------ scripts/solve_network.py | 14 ++++++-------- scripts/solve_operations_network.py | 9 ++++----- 24 files changed, 106 insertions(+), 122 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index c2004235e..62c560661 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -17,9 +17,9 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os import shlex +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 053ca0a4b..2c8985008 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -3,9 +3,10 @@ # # SPDX-License-Identifier: MIT -import pandas as pd from pathlib import Path +import pandas as pd + def configure_logging(snakemake, skip_handlers=False): """ @@ -111,7 +112,7 @@ def pdbcast(v, h): def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): import pypsa - from add_electricity import update_transmission_costs, load_costs + from add_electricity import load_costs, update_transmission_costs n = pypsa.Network(fn) @@ -251,6 +252,7 @@ def aggregate_costs(n, flatten=False, opts=None, existing_only=False): def progress_retrieve(url, file): import urllib + from progressbar import ProgressBar pbar = ProgressBar(0, 100) @@ -277,8 +279,9 @@ def mock_snakemake(rulename, **wildcards): keyword arguments fixing the wildcards. Only necessary if wildcards are needed. """ - import snakemake as sm import os + + import snakemake as sm from pypsa.descriptors import Dict from snakemake.script import Snakemake diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 22accfffd..262bdbb74 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -85,19 +85,18 @@ """ import logging -from _helpers import configure_logging, update_p_nom_max -import pypsa -import pandas as pd -import numpy as np -import xarray as xr import geopandas as gpd +import numpy as np +import pandas as pd import powerplantmatching as pm +import pypsa +import xarray as xr +from _helpers import configure_logging, update_p_nom_max from powerplantmatching.export import map_country_bus - +from vresutils import transfer as vtransfer from vresutils.costdata import annuity from vresutils.load import timeseries_opsd -from vresutils import transfer as vtransfer idx = pd.IndexSlice diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index e57ff04d6..8d9e5d1f2 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -51,16 +51,15 @@ - ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit. """ import logging -from _helpers import configure_logging -import pypsa -import pandas as pd import numpy as np - +import pandas as pd +import pypsa +from _helpers import configure_logging from add_electricity import ( - load_costs, - add_nice_carrier_names, _add_missing_carriers_from_costs, + add_nice_carrier_names, + load_costs, ) idx = pd.IndexSlice diff --git a/scripts/base_network.py b/scripts/base_network.py index f1cf31170..ebd73c5ed 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -64,21 +64,21 @@ """ import logging -from _helpers import configure_logging +from itertools import product -import pypsa -import yaml -import pandas as pd import geopandas as gpd -import numpy as np import networkx as nx - +import numpy as np +import pandas as pd +import pypsa +import shapely +import shapely.prepared +import shapely.wkt +import yaml +from _helpers import configure_logging from scipy import spatial from scipy.sparse import csgraph -from itertools import product - -from shapely.geometry import Point, LineString -import shapely, shapely.prepared, shapely.wkt +from shapely.geometry import LineString, Point logger = logging.getLogger(__name__) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index 6b03a23a9..53f16b24b 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -43,13 +43,12 @@ """ import logging -from _helpers import configure_logging - -import pypsa import os -import pandas as pd -import geopandas as gpd +import geopandas as gpd +import pandas as pd +import pypsa +from _helpers import configure_logging from vresutils.graph import voronoi_partition_pts logger = logging.getLogger(__name__) diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 5194fca9f..fa224a76d 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -93,12 +93,12 @@ """ import logging + import atlite import geopandas as gpd import pandas as pd from _helpers import configure_logging - logger = logging.getLogger(__name__) if __name__ == "__main__": diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 95f9a66a6..b29b76665 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -61,10 +61,10 @@ """ import logging -from _helpers import configure_logging import atlite import geopandas as gpd +from _helpers import configure_logging from vresutils import hydro as vhydro logger = logging.getLogger(__name__) diff --git a/scripts/build_load_data.py b/scripts/build_load_data.py index 1b5cd4bc9..e72cef3d6 100755 --- a/scripts/build_load_data.py +++ b/scripts/build_load_data.py @@ -39,11 +39,10 @@ import logging logger = logging.getLogger(__name__) -from _helpers import configure_logging - -import pandas as pd -import numpy as np import dateutil +import numpy as np +import pandas as pd +from _helpers import configure_logging from pandas import Timedelta as Delta diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index d7120b8d1..a982c457a 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -41,11 +41,11 @@ """ import logging -from _helpers import configure_logging import atlite import geopandas as gpd import rasterio as rio +from _helpers import configure_logging from rasterio.features import geometry_mask from rasterio.warp import transform_bounds diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index cd64d073b..77b481fbb 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -73,13 +73,12 @@ """ import logging -from _helpers import configure_logging -import pypsa -import powerplantmatching as pm -import pandas as pd import numpy as np - +import pandas as pd +import powerplantmatching as pm +import pypsa +from _helpers import configure_logging from scipy.spatial import cKDTree as KDTree logger = logging.getLogger(__name__) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index e32311f89..f4d4418f4 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -180,18 +180,18 @@ reached. """ -import progressbar as pgb -import geopandas as gpd -import xarray as xr -import numpy as np import functools -import atlite import logging -from pypsa.geo import haversine -from shapely.geometry import LineString import time +import atlite +import geopandas as gpd +import numpy as np +import progressbar as pgb +import xarray as xr from _helpers import configure_logging +from pypsa.geo import haversine +from shapely.geometry import LineString logger = logging.getLogger(__name__) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index fd9a916bd..6c975c475 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -69,19 +69,18 @@ """ import logging -from _helpers import configure_logging - import os -import numpy as np -from operator import attrgetter from functools import reduce from itertools import takewhile +from operator import attrgetter -import pandas as pd import geopandas as gpd +import numpy as np +import pandas as pd +import pycountry as pyc +from _helpers import configure_logging from shapely.geometry import MultiPolygon, Polygon from shapely.ops import cascaded_union -import pycountry as pyc logger = logging.getLogger(__name__) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index ed8db1ecb..85b423d4e 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -123,30 +123,26 @@ """ import logging -from _helpers import configure_logging, update_p_nom_max - -import pypsa import os -import shapely +from functools import reduce -import pandas as pd -import numpy as np import geopandas as gpd -import pyomo.environ as po import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import pyomo.environ as po +import pypsa import seaborn as sns - -from functools import reduce - +import shapely +from _helpers import configure_logging, update_p_nom_max +from add_electricity import load_costs from pypsa.networkclustering import ( + _make_consense, busmap_by_kmeans, busmap_by_spectral_clustering, - _make_consense, get_clustering_from_busmap, ) -from add_electricity import load_costs - idx = pd.IndexSlice logger = logging.getLogger(__name__) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index f0510ecea..275907e77 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -55,12 +55,11 @@ """ import logging -from _helpers import configure_logging - import os -import pypsa -import pandas as pd +import pandas as pd +import pypsa +from _helpers import configure_logging from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice diff --git a/scripts/plot_network.py b/scripts/plot_network.py index eb409579e..28cfef434 100755 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -21,21 +21,20 @@ """ import logging + +import cartopy.crs as ccrs +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from _helpers import ( - load_network_for_plots, - aggregate_p, aggregate_costs, + aggregate_p, configure_logging, + load_network_for_plots, ) - -import pandas as pd -import numpy as np - -import cartopy.crs as ccrs -import matplotlib.pyplot as plt -import matplotlib as mpl -from matplotlib.patches import Circle, Ellipse from matplotlib.legend_handler import HandlerPatch +from matplotlib.patches import Circle, Ellipse to_rgba = mpl.colors.colorConverter.to_rgba diff --git a/scripts/plot_p_nom_max.py b/scripts/plot_p_nom_max.py index 948540770..a968058e2 100644 --- a/scripts/plot_p_nom_max.py +++ b/scripts/plot_p_nom_max.py @@ -20,11 +20,11 @@ """ import logging -from _helpers import configure_logging -import pypsa -import pandas as pd import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging logger = logging.getLogger(__name__) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 3cba60b6b..384116f06 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -20,12 +20,12 @@ """ -import os import logging -from _helpers import configure_logging +import os -import pandas as pd import matplotlib.pyplot as plt +import pandas as pd +from _helpers import configure_logging logger = logging.getLogger(__name__) diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index a51b1abea..05be23a43 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -38,9 +38,9 @@ """ import logging -from _helpers import configure_logging import pandas as pd +from _helpers import configure_logging logger = logging.getLogger(__name__) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index e5ac4db14..e969bd239 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -57,13 +57,12 @@ """ import logging -from _helpers import configure_logging - import re -import pypsa + import numpy as np import pandas as pd - +import pypsa +from _helpers import configure_logging from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index 2272b730c..789baafe3 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -34,11 +34,11 @@ """ import logging -from _helpers import progress_retrieve, configure_logging - import tarfile from pathlib import Path +from _helpers import configure_logging, progress_retrieve + logger = logging.getLogger(__name__) diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 4d655bb0d..fdb0074d5 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -84,27 +84,24 @@ """ import logging -from _helpers import configure_logging, update_p_nom_max - -from cluster_network import clustering_for_n_clusters, cluster_regions -from add_electricity import load_costs - -import pandas as pd -import numpy as np -import scipy as sp -from scipy.sparse.csgraph import connected_components, dijkstra - from functools import reduce +import numpy as np +import pandas as pd import pypsa +import scipy as sp +from _helpers import configure_logging, update_p_nom_max +from add_electricity import load_costs +from cluster_network import cluster_regions, clustering_for_n_clusters from pypsa.io import import_components_from_dataframe, import_series_from_dataframe from pypsa.networkclustering import ( - busmap_by_stubs, + _make_consense, aggregategenerators, aggregateoneport, + busmap_by_stubs, get_clustering_from_busmap, - _make_consense, ) +from scipy.sparse.csgraph import connected_components, dijkstra logger = logging.getLogger(__name__) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 22da63c46..717d4cecb 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -78,23 +78,21 @@ """ import logging -from _helpers import configure_logging +import re +from pathlib import Path import numpy as np import pandas as pd -import re - import pypsa +from _helpers import configure_logging from pypsa.linopf import ( - get_var, define_constraints, - linexpr, + get_var, + ilopf, join_exprs, + linexpr, network_lopf, - ilopf, ) - -from pathlib import Path from vresutils.benchmark import memory_logger logger = logging.getLogger(__name__) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index 2fe292d21..d60d77054 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -47,14 +47,13 @@ """ import logging -from _helpers import configure_logging +from pathlib import Path -import pypsa import numpy as np - -from pathlib import Path +import pypsa +from _helpers import configure_logging +from solve_network import prepare_network, solve_network from vresutils.benchmark import memory_logger -from solve_network import solve_network, prepare_network logger = logging.getLogger(__name__) From 0fcb44d53008f70c95304bdf35f6904118bc7e4f Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Wed, 12 Jan 2022 15:08:32 +0100 Subject: [PATCH 19/20] Add pre-commit hook for fixing midex-line-ending . --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b91afb1c3..45555346b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,9 +9,9 @@ repos: rev: v4.1.0 hooks: - id: check-merge-conflict - - id: check-yaml - id: end-of-file-fixer - id: fix-encoding-pragma + - id: mixed-line-ending # Sort package imports alphabetically - repo: https://github.com/PyCQA/isort From 8b2ceda94840fd316ad606e537ba302a27ff0e8e Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Thu, 13 Jan 2022 18:25:09 +0100 Subject: [PATCH 20/20] Add check for large files (>2MB). --- .pre-commit-config.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 45555346b..ba5b9c128 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,6 +12,8 @@ repos: - id: end-of-file-fixer - id: fix-encoding-pragma - id: mixed-line-ending + - id: check-added-large-files + args: ['--maxkb=2000'] # Sort package imports alphabetically - repo: https://github.com/PyCQA/isort