diff --git a/.github/workflows/devcontainer.yml b/.github/workflows/devcontainer.yml index 85de566fe..ec5b4cdf0 100644 --- a/.github/workflows/devcontainer.yml +++ b/.github/workflows/devcontainer.yml @@ -14,10 +14,10 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Login to GitHub Container Registry - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ secrets.REGISTRY_USER }} diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9ebd17f4d..8cf4c6ce4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,9 +7,11 @@ jobs: contrib-readme-job: runs-on: ubuntu-latest name: A job to automate contrib in readme - if: ${{ github.repository_owner == 'pypsa-meets-earth' && github.ref == 'refs/heads/main'}} + if: ${{ github.event_name == 'workflow_dispatch' || (github.repository_owner == 'pypsa-meets-earth' && github.ref == 'refs/heads/main')}} steps: - name: Contribute List uses: akhilmhdh/contributors-readme-action@v2.3.10 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + use_username: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a243a304b..8ab61e1e7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,8 +31,8 @@ jobs: env_file: envs/linux-pinned.yaml - os: macos env_file: envs/macos-pinned.yaml - # - os: windows - # env_file: envs/windows-pinned.yaml + - os: windows + env_file: envs/windows-pinned.yaml defaults: run: diff --git a/.readthedocs.yaml b/.readthedocs.yaml index e04e28caf..2bac06440 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -4,6 +4,9 @@ version: 2 +sphinx: + configuration: doc/conf.py + build: os: ubuntu-22.04 tools: diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 000000000..9b29144b4 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: PyPSA-Earth and PyPSA-Eur Authors +# SPDX-License-Identifier: AGPL-3.0-or-later + +title: PyPSA-Earth. A new global open energy system optimization model demonstrated in Africa +abstract: This repository contains the source code of the paper PyPSA-Earth. A new global open energy system optimization model demonstrated in Africa by Maximilian Parzen, Hazem Abdel-Khalek, Ekaterina Fedotova, Matin Mahmood, Martha Maria Frysztacki, Johannes Hampp, Lukas Franken, Leon Schumm, Fabian Neumann, Davide Poli, Aristides Kiprakis and Davide Fioriti, from Applied Energy +doi: 10.1016/j.apenergy.2023.121096 +repository-code: https://github.com/pypsa-meets-earth/pypsa-earth +version: 1.0.0 +date-released: 2023-04-18 +message: If you use this software in your work, please cite it using the following metadata. +authors: + - given-names: Maximilian + family-names: Parzen + orcid: https://orcid.org/0000-0002-4390-0063 + - given-names: Hazem + family-names: Abdel-Khalek + - given-names: Ekaterina + family-names: Fedotova + orcid: https://orcid.org/0000-0002-5590-9591 + - given-names: Matin + family-names: Mahmood + - given-names: Martha Maria + family-names: Frysztacki + orcid: https://orcid.org/0000-0002-0788-1328 + - given-names: Johannes + family-names: Hampp + orcid: https://orcid.org/0000-0002-1776-116X + - given-names: Lukas + family-names: Franken + - given-names: Leon + family-names: Schumm + - given-names: Fabian + family-names: Neumann + orcid: https://orcid.org/0000-0002-6604-5450 + - given-names: Davide + family-names: Poli + orcid: https://orcid.org/0000-0002-5045-9034 + - given-names: Aristides + family-names: Kiprakis + - given-names: Davide + family-names: Fioriti + orcid: https://orcid.org/0000-0001-5491-7912 +cff-version: 1.2.0 diff --git a/README.md b/README.md index 34b998185..0aac8b5db 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,8 @@ by [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/pypsa-meets-earth/pypsa-earth/main.svg)](https://results.pre-commit.ci/latest/github/pypsa-meets-earth/pypsa-earth/main) [![Discord](https://img.shields.io/discord/911692131440148490?logo=discord)](https://discord.gg/AnuJBk23FU) [![Google Drive](https://img.shields.io/badge/Google%20Drive-4285F4?style=flat&logo=googledrive&logoColor=white)](https://drive.google.com/drive/folders/13Z8Y9zgsh5IZaDNkkRyo1wkoMgbdUxT5?usp=sharing) +[![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.apenergy.2023.121096-blue)](https://doi.org/10.1016/j.apenergy.2023.121096) + **PyPSA-Earth: A Global Sector-Coupled Open-Source Multi-Energy System Model** @@ -58,41 +60,22 @@ The diagram below depicts one representative clustered node for the sector-coupl -## Get involved - -There are multiple ways to get involved and learn more about our work. That's how we organise ourselves: - -- [**Discord NEW! (Open)**](https://discord.gg/AnuJBk23FU) - - chat with the community, team up on features, exchange with developers, code in voice channels - - registration and usage is for free -

- - - -

-- **General initiative meeting (Open)** - - every forth Thursday each month Thursday 16-17:00 (UK time) - - `download .ics` - - - join for project news and high-level code updates - - meeting hosted on Discord - - [open agenda](https://docs.google.com/document/d/1r6wm2RBe0DWFngmItpFfSFHA-CnUmVcVTkIKmthdW3g/edit?usp=sharing). See what we will discuss. Invited members have edit rights. -- **Buddy talk (Open)** - - book a 30min meeting with Max to discuss anything you like - - booking link: [calendly.com/pypsa-meets-earth](https://calendly.com/max-parzen/pypsa-meets-earth-exchange-30min) -- **Specific code meeting (Open)** - - meeting hosted on Discord - - join updates, demos, Q&A's, discussions and the coordination of each work package - 1. Demand creation and prediction meeting, on demand - 2. AI asset detection meeting, on demand - 3. Sector coupling meeting, every Thursday 09:00 (UK time), `download .ics` - 4. PyPSA-Earth meeting, every Thursday 16:00 (UK time), `download .ics` -- **Outreach meeting (Open)** - - every second week, Tuesday 17:00 (UK time) - - planning, discussing events, workshops, communication, community activities -- [**Google Drive**](https://drive.google.com/drive/folders/13Z8Y9zgsh5IZaDNkkRyo1wkoMgbdUxT5?usp=sharing) - - access to minutes, presentations, lists, documents (access to minutes) +## How to get involved + +There are multiple ways to get involved and learn more about our work: +1. **Join our forum** and communication platform on [**PyPSA-meets-Earth**](https://discord.gg/AnuJBk23FU) Discord Server +2. **Chat on Discord with us** in the following open meetings: + - **General initiative meeting** for project news and [high-level code updates](https://docs.google.com/document/d/1r6wm2RBe0DWFngmItpFfSFHA-CnUmVcVTkIKmthdW3g/edit?usp=sharing). Held every [fourth Thursday 16-17:00 (UK time)](https://drive.google.com/file/d/1naH4WwW9drkOkOJ3PLO4fyWdkZQi5-_w/view?usp=share_link) and is a perfect place to meet the community and get a high-level update on PyPSA ecosystem relevant for PyPSA-Earth developments. + - **Weekly developers meetings** + - Eastern-Hemisphere friendly *Morning meeting* every [Thursday at 09:00 (UK time)](https://drive.google.com/file/d/1PDdmjsKhzyGRo0_YrP4wPQkn2XTNh6jA/view?usp=share_link). + - Western-Hemisphere friendly *Evening meeting* every [Thursday 16:00 (UK time)](https://drive.google.com/file/d/1gaLmyV4qGPXsogkeRcAPWjC0ESebUxU-/view?usp=share_link). Every forth Thursday is replaced by the General initiative meeting which has a more high-level perspective, but you can also join to discuss more particular questions. +3. **Look at public materials** at [**google Drive**](https://drive.google.com/drive/folders/13Z8Y9zgsh5IZaDNkkRyo1wkoMgbdUxT5?usp=sharing) to share to minutes, presentations, lists and documents. Feel gree to get a look! +4. **Notify your interest** to on-demand meetings: + - On-demand meetings + - Demand creation and prediction meeting + - AI asset detection meeting + - Outreach meeting for planning, discussing events, workshops, communication, community activities +5. Join us and **propose your stream**. ## Installation @@ -140,10 +123,10 @@ There are multiple ways to get involved and learn more about our work. That's ho ## Running the model in previous versions -The model can be run in previous versions by checking out the respective tag. For instance, to run the model in version 0.4.1, which is the last version before the repo `pypsa-earth-sec` was merged, the following command can be used: +The model can be run in previous versions by checking out the respective tag. For instance, to run the model in version 0.6.0, which is the last version before the repo `pypsa-earth-sec` was merged, the following command can be used: ```bash -git checkout v0.4.1 +git checkout v0.6.0 ``` After checking out the tag, the model can be run as usual. Please make sure to install the required packages for the respective version. @@ -187,322 +170,396 @@ The documentation is available here: [documentation](https://pypsa-earth.readthe - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
- - SermishaNarayana -
- Null -
-
- - davide-f -
- Davide-f -
-
- - ekatef -
- Ekaterina -
-
- - pz-max -
- Max Parzen -
-
- - DeniseGiub -
- DeniseGiub -
-
- - yerbol-akhmetov -
- Yerbol Akhmetov -
-
- - GbotemiB -
- Emmanuel Bolarinwa -
-
- - mnm-matin -
- Mnm-matin -
-
- - hazemakhalek -
- Hazem -
-
- - energyLS -
- EnergyLS -
-
- - Tomkourou -
- Thomas Kouroughli -
-
- - GridGrapher -
- GridGrapher -
-
- - martacki -
- Martha Frysztacki -
-
- - finozzifa -
- Finozzifa -
-
- - Emre-Yorat89 -
- Emre_Yorat -
-
- - virio-andreyana -
- Null -
-
- - giacfalk -
- Giacomo Falchetta -
-
- - Ekaterina-Vo -
- Ekaterina-Vo -
-
- - cpschau -
- Cschau -
-
- - euronion -
- Euronion -
-
- - AnasAlgarei -
- AnasAlgarei -
-
- - LukasFrankenQ -
- Lukas Franken -
-
- - Tooblippe -
- Tobias -
-
- - doneachh -
- Anton Achhammer -
-
- - koen-vg -
- Koen Van Greevenbroek -
-
- - carlosfv92 -
- Carlos Fernandez -
-
- - koen-vg -
- Koen Van Greevenbroek -
-
- - asolavi -
- Null -
-
- - Netotse -
- Null -
-
- - pitmonticone -
- Pietro Monticone -
-
- - siddharth-krishna -
- Siddharth Krishna -
-
- - squoilin -
- Sylvain Quoilin -
-
- - juli-a-ko -
- Juli-a-ko -
-
- - ollie-bell -
- Null -
-
- - rsparks3 -
- Ryan -
-
- - stephenjlee -
- Stephen J Lee -
-
- - kma33 -
- Katherine M. Antonio -
-
- - jessLryan -
- Jess -
-
- - jarry7 -
- Jarrad Wright -
-
- - HanaElattar -
- HanaElattar -
-
- - FabianHofmann -
- Fabian Hofmann -
-
- - EmreYorat -
- EmreYorat -
-
- - AndreCNF -
- André Cristóvão Neves Ferreira -
-
- - AlexanderMeisinger -
- Null -
-
+ + FabianHofmann +
+ Fabian Hofmann +
+
+ + fneum +
+ Fabian Neumann +
+
+ + ekatef +
+ Ekaterina +
+
+ + euronion +
+ Euronion +
+
+ + Justus-coded +
+ Justus Ilemobayo +
+
+ + mnm-matin +
+ Mnm-matin +
+
+ + martacki +
+ Martha Frysztacki +
+
+ + LukasFrankenQ +
+ Lukas Franken +
+
+ + pz-max +
+ Max Parzen +
+
+ + davide-f +
+ Davide-f +
+
+ + koen-vg +
+ Koen Van Greevenbroek +
+
+ + Eddy-JV +
+ Eddy Jalbout +
+
+ + hazemakhalek +
+ Hazem +
+
+ + energyLS +
+ EnergyLS +
+
+ + AnasAlgarei +
+ AnasAlgarei +
+
+ + yerbol-akhmetov +
+ Yerbol Akhmetov +
+
+ + GbotemiB +
+ Emmanuel Bolarinwa +
+
+ + DeniseGiub +
+ DeniseGiub +
+
+ + doneachh +
+ Anton Achhammer +
+
+ + Tomkourou +
+ Thomas Kouroughli +
+
+ + finozzifa +
+ Finozzifa +
+
+ + GridGrapher +
+ GridGrapher +
+
+ + drifter089 +
+ Akshat Mittal +
+
+ + glenkiely-ieg +
+ glenkiely-ieg +
+
+ + cpschau +
+ Cschau +
+
+ + Emre-Yorat89 +
+ Emre_Yorat +
+
+ + virio-andreyana +
+ Null +
+
+ + giacfalk +
+ Giacomo Falchetta +
+
+ + Ekaterina-Vo +
+ Ekaterina-Vo +
+
+ + lkstrp +
+ Lukas Trippe +
+
+ + Tooblippe +
+ Tobias +
+
+ + arizeosalac +
+ zeosalac +
+
+ + danielelerede-oet +
+ danielelerede-oet +
+
+ + carlosfv92 +
+ Carlos Fernandez +
+
+ + rajesh-ieg +
+ rajesh-ieg +
+
+ + asolavi +
+ Null +
+
+ + stephenjlee +
+ Stephen J Lee +
+
+ + rsparks3 +
+ Ryan +
+
+ + ollie-bell +
+ Null +
+
+ + juli-a-ko +
+ Juli-a-ko +
+
+ + squoilin +
+ Sylvain Quoilin +
+
+ + siddharth-krishna +
+ Siddharth Krishna +
+
+ + SermishaNarayana +
+ Null +
+
+ + pitmonticone +
+ Pietro Monticone +
+
+ + Netotse +
+ Null +
+
+ + milyas009 +
+ Muhammad Ilyas +
+
+ + kma33 +
+ Katherine M. Antonio +
+
+ + jessLryan +
+ Jess +
+
+ + jarry7 +
+ Jarrad Wright +
+
+ + HanaElattar +
+ HanaElattar +
+
+ + EmreYorat +
+ EmreYorat +
+
+ + AndreCNF +
+ André Cristóvão Neves Ferreira +
+
+ + AlexanderMeisinger +
+ Null +
+
diff --git a/Snakefile b/Snakefile index 589471ef3..b54a82e7a 100644 --- a/Snakefile +++ b/Snakefile @@ -16,6 +16,7 @@ from _helpers import ( get_last_commit_message, check_config_version, copy_default_files, + BASE_DIR, ) from build_demand_profiles import get_load_paths_gegis from retrieve_databundle_light import datafiles_retrivedatabundle @@ -53,12 +54,11 @@ CDIR = RDIR if not run.get("shared_cutouts") else "" SECDIR = run["sector_name"] + "/" if run.get("sector_name") else "" SDIR = config["summary_dir"].strip("/") + f"/{SECDIR}" RESDIR = config["results_dir"].strip("/") + f"/{SECDIR}" -COSTDIR = config["costs_dir"] load_data_paths = get_load_paths_gegis("data", config) if config["enable"].get("retrieve_cost_data", True): - COSTS = "resources/" + RDIR + "costs.csv" + COSTS = "resources/" + RDIR + f"costs_{config['costs']['year']}.csv" else: COSTS = "data/costs.csv" ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4) @@ -392,29 +392,18 @@ if not config["enable"].get("build_natura_raster", False): if config["enable"].get("retrieve_cost_data", True): rule retrieve_cost_data: + params: + version=config["costs"]["version"], input: HTTP.remote( - f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv", + f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/" + + "costs_{year}.csv", keep_local=True, ), output: - COSTS, + "resources/" + RDIR + "costs_{year}.csv", log: - "logs/" + RDIR + "retrieve_cost_data.log", - resources: - mem_mb=5000, - run: - move(input[0], output[0]) - - rule retrieve_cost_data_flexible: - input: - HTTP.remote( - f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs" - + "_{planning_horizons}.csv", - keep_local=True, - ), - output: - costs=COSTDIR + "costs_{planning_horizons}.csv", + "logs/" + RDIR + "retrieve_cost_data_{year}.log", resources: mem_mb=5000, run: @@ -564,6 +553,7 @@ rule add_electricity: rule simplify_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], renewable=config["renewable"], geo_crs=config["crs"]["geo_crs"], cluster_options=config["cluster_options"], @@ -606,6 +596,7 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == True: rule cluster_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], build_shape_options=config["build_shape_options"], electricity=config["electricity"], costs=config["costs"], @@ -691,6 +682,7 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == False: rule cluster_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], build_shape_options=config["build_shape_options"], electricity=config["electricity"], costs=config["costs"], @@ -744,7 +736,10 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == False: rule add_extra_components: + params: + transmission_efficiency=config["sector"]["transmission_efficiency"], input: + overrides="data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", tech_costs=COSTS, output: @@ -818,6 +813,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False: solving=config["solving"], augmented_line_connection=config["augmented_line_connection"], input: + overrides=BASE_DIR + "/data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", output: "results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", @@ -884,6 +880,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: solving=config["solving"], augmented_line_connection=config["augmented_line_connection"], input: + overrides=BASE_DIR + "/data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", @@ -913,7 +910,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: resources: mem_mb=memory, shadow: - "shallow" + "copy-minimal" if os.name == "nt" else "shallow" script: "scripts/solve_network.py" @@ -1004,7 +1001,7 @@ rule prepare_ports: params: custom_export=config["custom_data"]["export_ports"], output: - ports="data/ports.csv", # TODO move from data to resources + ports="resources/" + SECDIR + "ports.csv", export_ports="resources/" + SECDIR + "export_ports.csv", script: "scripts/prepare_ports.py" @@ -1015,14 +1012,14 @@ rule prepare_airports: airport_sizing_factor=config["sector"]["airport_sizing_factor"], airport_custom_data=config["custom_data"]["airports"], output: - ports="data/airports.csv", # TODO move from data to resources + ports="resources/" + SECDIR + "airports.csv", script: "scripts/prepare_airports.py" rule prepare_urban_percent: output: - urban_percent="data/urban_percent.csv", # TODO move from data to resources + urban_percent="resources/" + SECDIR + "urban_percent.csv", script: "scripts/prepare_urban_percent.py" @@ -1071,7 +1068,7 @@ rule prepare_sector_network: input: network=RESDIR + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_presec.nc", - costs=COSTDIR + "costs_{planning_horizons}.csv", + costs="resources/" + RDIR + "costs_{planning_horizons}.csv", h2_cavern="data/hydrogen_salt_cavern_potentials.csv", nodal_energy_totals="resources/" + SECDIR @@ -1095,9 +1092,11 @@ rule prepare_sector_network: industrial_demand="resources/" + SECDIR + "demand/industrial_energy_demand_per_node_elec_s{simpl}_{clusters}_{planning_horizons}_{demand}.csv", - energy_totals="data/energy_totals_{demand}_{planning_horizons}.csv", - airports="data/airports.csv", - ports="data/ports.csv", + energy_totals="resources/" + + SECDIR + + "energy_totals_{demand}_{planning_horizons}.csv", + airports="resources/" + SECDIR + "airports.csv", + ports="resources/" + SECDIR + "ports.csv", heat_demand="resources/" + SECDIR + "demand/heat/heat_demand_{demand}_s{simpl}_{clusters}_{planning_horizons}.csv", @@ -1151,7 +1150,7 @@ rule build_ship_profile: rule add_export: params: - gadm_level=config["sector"]["gadm_level"], + gadm_layer_id=config["build_shape_options"]["gadm_layer_id"], alternative_clustering=config["cluster_options"]["alternative_clustering"], store=config["export"]["store"], store_capital_costs=config["export"]["store_capital_costs"], @@ -1163,7 +1162,7 @@ rule add_export: input: overrides="data/override_component_attrs", export_ports="resources/" + SECDIR + "export_ports.csv", - costs=COSTDIR + "costs_{planning_horizons}.csv", + costs="resources/" + RDIR + "costs_{planning_horizons}.csv", ship_profile="resources/" + SECDIR + "ship_profile_{h2export}TWh.csv", network=RESDIR + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}.nc", @@ -1201,7 +1200,9 @@ rule override_respot: }, overrides="data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - energy_totals="data/energy_totals_{demand}_{planning_horizons}.csv", + energy_totals="resources/" + + SECDIR + + "energy_totals_{demand}_{planning_horizons}.csv", output: RESDIR + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_presec.nc", @@ -1212,7 +1213,9 @@ rule override_respot: rule prepare_transport_data: input: network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", - energy_totals_name="data/energy_totals_{demand}_{planning_horizons}.csv", + energy_totals_name="resources/" + + SECDIR + + "energy_totals_{demand}_{planning_horizons}.csv", traffic_data_KFZ="data/emobility/KFZ__count", traffic_data_Pkw="data/emobility/Pkw__count", transport_name="resources/" + SECDIR + "transport_data.csv", @@ -1296,7 +1299,9 @@ rule build_cop_profiles: rule prepare_heat_data: input: network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", - energy_totals_name="data/energy_totals_{demand}_{planning_horizons}.csv", + energy_totals_name="resources/" + + SECDIR + + "energy_totals_{demand}_{planning_horizons}.csv", clustered_pop_layout="resources/" + SECDIR + "population_shares/pop_layout_elec_s{simpl}_{clusters}_{planning_horizons}.csv", @@ -1349,7 +1354,8 @@ rule build_base_energy_totals: input: unsd_paths="data/demand/unsd/paths/Energy_Statistics_Database.xlsx", output: - energy_totals_base="data/energy_totals_base.csv", + energy_totals_base="resources/" + SECDIR + "energy_totals_base.csv", + unsd_export_path=directory("data/demand/unsd/data/"), script: "scripts/build_base_energy_totals.py" @@ -1360,13 +1366,15 @@ rule prepare_energy_totals: base_year=config["demand_data"]["base_year"], sector_options=config["sector"], input: - unsd_paths="data/energy_totals_base.csv", + unsd_paths="resources/" + SECDIR + "energy_totals_base.csv", efficiency_gains_cagr="data/demand/efficiency_gains_cagr.csv", growth_factors_cagr="data/demand/growth_factors_cagr.csv", district_heating="data/demand/district_heating.csv", fuel_shares="data/demand/fuel_shares.csv", output: - energy_totals="data/energy_totals_{demand}_{planning_horizons}.csv", + energy_totals="resources/" + + SECDIR + + "energy_totals_{demand}_{planning_horizons}.csv", script: "scripts/prepare_energy_totals.py" @@ -1420,7 +1428,7 @@ rule build_population_layouts: planning_horizons=config["scenario"]["planning_horizons"][0], input: nuts3_shapes="resources/" + RDIR + "shapes/gadm_shapes.geojson", - urban_percent="data/urban_percent.csv", + urban_percent="resources/" + SECDIR + "urban_percent.csv", cutout="cutouts/" + CDIR + [c["cutout"] for _, c in config["renewable"].items()][0] @@ -1616,18 +1624,18 @@ if config["foresight"] == "overnight": solving=config["solving"], augmented_line_connection=config["augmented_line_connection"], input: - overrides="data/override_component_attrs", + overrides=BASE_DIR + "/data/override_component_attrs", # network=RESDIR # + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}.nc", network=RESDIR + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.nc", - costs=COSTDIR + "costs_{planning_horizons}.csv", + costs="resources/" + RDIR + "costs_{planning_horizons}.csv", configs=SDIR + "configs/config.yaml", # included to trigger copy_config rule output: RESDIR + "postnetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.nc", shadow: - "shallow" + "copy-minimal" if os.name == "nt" else "shallow" log: solver=RESDIR + "logs/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export_solver.log", @@ -1666,7 +1674,7 @@ rule make_sector_summary: **config["costs"], **config["export"], ), - costs=COSTDIR + "costs_{planning_horizons}.csv", + costs="resources/" + RDIR + "costs_{planning_horizons}.csv", plots=expand( RESDIR + "maps/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}-costs-all_{planning_horizons}_{discountrate}_{demand}_{h2export}export.pdf", @@ -1825,7 +1833,7 @@ rule prepare_db: rule build_industrial_distribution_key: #default data params: countries=config["countries"], - gadm_level=config["sector"]["gadm_level"], + gadm_layer_id=config["build_shape_options"]["gadm_layer_id"], alternative_clustering=config["cluster_options"]["alternative_clustering"], industry_database=config["custom_data"]["industry_database"], input: @@ -1865,9 +1873,10 @@ rule build_base_industry_totals: #default data countries=config["countries"], other_industries=config["demand_data"]["other_industries"], input: + #os.path.dirname(snakemake.input["transactions_path"]) + "/demand/unsd/data/" #industrial_production_per_country="data/industrial_production_per_country.csv", - #unsd_path="data/demand/unsd/data/", - energy_totals_base="data/energy_totals_base.csv", + unsd_export_path="data/demand/unsd/data/", + energy_totals_base="resources/" + SECDIR + "energy_totals_base.csv", transactions_path="data/unsd_transactions.csv", output: base_industry_totals="resources/" @@ -1903,7 +1912,7 @@ rule build_industry_demand: #default data + SECDIR + "demand/base_industry_totals_{planning_horizons}_{demand}.csv", industrial_database="data/industrial_database.csv", - costs=COSTDIR + "costs_{planning_horizons}.csv", + costs="resources/" + RDIR + "costs_{planning_horizons}.csv", industry_growth_cagr="data/demand/industry_growth_cagr.csv", output: industrial_energy_demand_per_node="resources/" @@ -2081,7 +2090,7 @@ if config["foresight"] == "myopic": "co2_sequestration_potential", 200 ), input: - overrides="data/override_component_attrs", + overrides=BASE_DIR + "/data/override_component_attrs", network=RESDIR + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.nc", costs=CDIR + "costs_{planning_horizons}.csv", @@ -2092,7 +2101,7 @@ if config["foresight"] == "myopic": # config=RESDIR # + "configs/config.elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.yaml", shadow: - "shallow" + "copy-minimal" if os.name == "nt" else "shallow" log: solver=RESDIR + "logs/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export_solver.log", diff --git a/config.default.yaml b/config.default.yaml index 715a8a4a9..224f5f237 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: CC0-1.0 -version: 0.5.0 +version: 0.6.0 tutorial: false logging: @@ -11,7 +11,6 @@ logging: results_dir: results/ summary_dir: results/ -costs_dir: data/ # TODO change to the equivalent of technology data foresight: overnight @@ -85,7 +84,7 @@ cluster_options: remove_stubs_across_borders: true p_threshold_drop_isolated: 20 # [MW] isolated buses are being discarded if bus mean power is below the specified threshold p_threshold_merge_isolated: 300 # [MW] isolated buses are being merged into a single isolated bus if a bus mean power is below the specified threshold - s_threshold_fetch_isolated: 0.05 # [-] a share of the national load for merging an isolated network into a backbone network + s_threshold_fetch_isolated: false # [-] a share of the national load for merging an isolated network into a backbone network cluster_network: algorithm: kmeans feature: solar+onwind-time @@ -99,6 +98,7 @@ cluster_options: p_nom_max: sum p_nom_min: sum p_min_pu: mean + p_max_pu: weighted_average marginal_cost: mean committable: any ramp_limit_up: max @@ -358,7 +358,7 @@ renewable: # Costs Configuration costs: year: 2030 - version: v0.6.2 + version: v0.10.0 discountrate: [0.071] #, 0.086, 0.111] # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html # noqa: E501 USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html @@ -603,6 +603,9 @@ sector: transmission_efficiency: electricity distribution grid: efficiency_static: 0.97 # efficiency of distribution grid (i.e. 3% loses) + H2 pipeline: + efficiency_per_1000km: 1 + compression_per_1000km: 0.017 # DEA technology data. Mean of Energy losses, lines 5000-20000 MW and lines >20000 MW for 2020, 2030 and 2050, [%/1000 km] dynamic_transport: enable: false # If "True", then the BEV and FCEV shares are obtained depending on the "Co2L"-wildcard (e.g. "Co2L0.70: 0.10"). If "False", then the shares are obtained depending on the "demand" wildcard and "planning_horizons" wildcard as listed below (e.g. "DF_2050: 0.08") @@ -658,7 +661,6 @@ sector: co2_network: true co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe co2_sequestration_cost: 10 #EUR/tCO2 for sequestration of CO2 - hydrogen_underground_storage: true shipping_hydrogen_liquefaction: false shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011 @@ -673,7 +675,6 @@ sector: NZ_2050: 0.36 DF_2050: 0.12 - gadm_level: 1 h2_cavern: true marginal_cost_storage: 0 methanation: true @@ -696,7 +697,6 @@ sector: biomass: biomass keep_existing_capacities: true - solving: options: formulation: kirchhoff diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 7ada63032..8f31af9bf 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: CC0-1.0 -version: 0.5.0 +version: 0.6.0 tutorial: true diff --git a/data/custom_powerplants.csv b/data/custom_powerplants.csv index fb83a5ff4..d81c32bca 100644 --- a/data/custom_powerplants.csv +++ b/data/custom_powerplants.csv @@ -1 +1 @@ -Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,StorageCapacity_MWh,DateIn,DateRetrofit,DateMothball,DateOut,lat,lon,EIC,projectID,bus +Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,StorageCapacity_MWh,DateIn,DateRetrofit,DateOut,lat,lon,EIC,projectID,bus diff --git a/doc/conf.py b/doc/conf.py index cf4323277..b8ff6c30c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -65,7 +65,7 @@ copyright = f"{datetime.datetime.today().year}, {author}" # The full version, including alpha/beta/rc tags -release = "0.5.0" +release = "0.6.0" # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" diff --git a/doc/index.rst b/doc/index.rst index 92e17a55a..25e43dfcb 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -78,39 +78,19 @@ The `website `_ provides more context of t Get Involved ============== -The PyPSA meets Earth team is currently running four types of meetings: - - -- `**Discord NEW! (Open)** `_ - - - Chat with the community, team up on features, exchange with developers, code in voice channels - -- **General code meeting (Open)** - - - every forth Thursday each month 16-17:00 (UK time) `download .ics file `_ - - updates on overall project and code blocks - - meeting hosted on `Discord `_; join us, we are waiting for you! - - `open agenda `_. See what we will discuss. Invited members have edit rights. - -- **Specific code meeting (by invitation)** - - - meeting hosted on Discord - - join updates, demos, Q&A's, discussions and the coordination of each work package - - 1. Demand creation and prediction meeting, on demand basis - 2. AI asset detection meeting, on demand basis - 3. Sector coupling meeting, every Thursday 09:00 (UK time), `download .ics file `__ - 4. PyPSA-Earth meeting, every Thursday 16:00 (UK time), `download .ics file `__ - -- **Outreach meeting (by invitation)** - - - every second week - - planning, discussing events, workshops, communication, community activities - -- **Buddy talk (Open)** - - - book a 30min meeting with Max to discuss anything you like - - booking link: `calendly.com/pypsa-meets-earth `_ +There are multiple ways to get involved and learn more about our work: + 1. **Join our forum** and communication platform on `PyPSA-meets-Earth `__ Discord Server + 2. **Chat on Discord with us** in the following meetings: + - General initiative meeting for project news and `high-level code updates `__. Held every `fourth Thursday 16-17:00 (UK time) `__ and is a perfect place to meet the community and get a high-level update on PyPSA ecosystem relevant for PyPSA-Earth developments. + - Weekly developers meetings + - Eastern-Hemisphere friendly *Morning meeting* every `Thursday at 09:00 (UK time) `__. + - Western-Hemisphere friendly *Evening meeting* every `Thursday 16:00 (UK time) `__. Every forth Thursday is replaced by the General initiative meeting which has a more high-level perspective, but you can also join to discuss more particular questions. + 3. **Look at public materials** at `google Drive `__ to share to minutes, presentations, lists and documents. Feel gree to get a look! + 4. **Notify your interest** to on-demand meetings: + - Demand creation and prediction meeting + - AI asset detection meeting + - Outreach meeting for planning, discussing events, workshops, communication, community activities + 5. Join us and **propose your stream**. ============= Documentation diff --git a/doc/release_notes.rst b/doc/release_notes.rst index cf5b93895..23a85145f 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -13,13 +13,45 @@ This part of documentation collects descriptive release notes to capture the mai **New Features and Major Changes** +* Drop duplication of retrieve_data and COST_DIR, add params and update technology-data version `PR #1249 `__ + +* In alternative clustering, generate hydro inflows by shape and avoid hydro inflows duplication for plants installed in the same node `PR #1120 ` + +* Add a function to calculate length-based efficiencies and apply it to the H2 pipelines. `PR #1192 `__ + +* Support of Linopy for Power and Sector-Coupled Modelling and latest PyPSA version `PR #1172 `__ + +* Update workflow to geopandas >= 1.0 `PR #1276 `__ + +**Minor Changes and bug-fixing** + +* Align structure of the components with consistency checks in the updated PyPSA version `PR #1315 `__ + +* Prevent computation of powerplantmatching if replace option is selected for custom_powerplants `PR #1281 `__ + +* Fix overlapping bus regions when alternative clustering is selected `PR #1287 `__ + +* Fix readthedocs by explicitly specifying the location of the Sphinx config `PR #1292 `__ + +* Fix lossy bidirectional links, especially H2 pipelines, which would sometimes gain H2 instead of losing it. `PR #1192 `__ + +* Fix the need for administrative rights on Windows by changing all shadow directory settings for Windows in the Snakefile `PR #1295 `__ and `PR #1301 `__ + +* Add CITATION.cff to guide users on how cite PyPSA-Earth. Insert the DOI badge in the README linking to the very first PyPSA-Earth paper. `PR #1316 `__ + +* Remove pyomo from cluster_network and introduce scip. `PR #1320 `__ + +PyPSA-Earth 0.6.0 +================= + +**New Features and Major Changes (24th December 2024)** + * Include option in the config to allow for custom airport data `PR #1241 `__ * Added Dev Containers and docker as an option to get started with pypsa-earth `PR #1228 `__ * Add a list of PyPSA-Earth applications in academic and industrial projects `PR #1255 `__ - * Computational improvements of build_osm_network `PR #845 `__ * Boost computational performances of set_lines_ids with cKDTree by scipy `PR #806 `__ @@ -134,6 +166,7 @@ PyPSA-Earth 0.4.0 * Add an option to use csv format for custom demand imports. `PR #995 `__ + **Minor Changes and bug-fixing** * Minor bug-fixing to run the cluster wildcard min `PR #1019 `__ @@ -537,13 +570,13 @@ Release Process * Make sure thah pinned versions of the environments ``*-pinned.yaml`` in ``envs`` folder are up-to-date. -* Update version number in ``doc/conf.py`` and ``*config.*.yaml``. +* Update version number in ``doc/conf.py``, ``default.config.yaml``, ``tutorial.config.yaml`` and ``test/config.*.yaml``. * Open, review and merge pull request for branch ``release-v0.x.x``. Make sure to close issues and PRs or the release milestone with it (e.g. closes #X). Run ``pre-commit run --all`` locally and fix any issues. -* Tag a release on Github via ``git tag v0.x.x``, ``git push``, ``git push --tags``. Include release notes in the tag message. +* Update and checkout your local `main` and tag a release with ``git tag v0.x.x``, ``git push``, ``git push --tags``. Include release notes in the tag message using Github UI. * Upload code to `zenodo code repository `_ with `GPLv3 license `_. diff --git a/envs/environment.yaml b/envs/environment.yaml index dd5670196..4322c7fd1 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -12,7 +12,8 @@ dependencies: - pip - mamba # esp for windows build -- pypsa>=0.24, <0.25 +- pypsa>=0.25 +# - atlite>=0.2.4 # until https://github.com/PyPSA/atlite/issues/244 is not merged - dask # currently the packages are being installed with pip # need to move back to conda once the issues will be resolved @@ -28,6 +29,7 @@ dependencies: - memory_profiler - ruamel.yaml<=0.17.26 - pytables +- pyscipopt # added to compy with the quadratic objective requirement of the clustering script - lxml - numpy # starting from 1.3.5 numpoly requires numpy>2.0 which leads to issues @@ -42,13 +44,14 @@ dependencies: - pydoe2 - shapely!=2.0.4 - pre-commit -- pyomo +- scip!=9.2.0 # dependency of pyscipopt, temporary fix - matplotlib<=3.5.2 - reverse-geocode - country_converter - pyogrio - numba - py7zr +- tsam>=1.1.0 # Keep in conda environment when calling ipython - ipython @@ -80,11 +83,9 @@ dependencies: # Default solver for tests (required for CI) - glpk -- ipopt - gurobi - pip: - git+https://github.com/davide-f/google-drive-downloader@master # google drive with fix for virus scan - - tsam>=1.1.0 - chaospy # lastest version only available on pip - fake_useragent diff --git a/envs/linux-pinned.yaml b/envs/linux-pinned.yaml index 9faa2139b..6607f427b 100644 --- a/envs/linux-pinned.yaml +++ b/envs/linux-pinned.yaml @@ -17,7 +17,7 @@ dependencies: - alsa-lib=1.2.13 - ampl-asl=1.0.0 - amply=0.1.6 -- anyio=4.7.0 +- anyio=4.8.0 - appdirs=1.4.4 - argon2-cffi=23.1.0 - argon2-cffi-bindings=21.2.0 @@ -27,8 +27,8 @@ dependencies: - atk-1.0=2.38.0 - atlite=0.3.0 - attr=2.5.1 -- attrs=24.3.0 -- aws-c-auth=0.8.0 +- attrs=25.1.0 +- aws-c-auth=0.8.1 - aws-c-cal=0.8.1 - aws-c-common=0.10.6 - aws-c-compression=0.3.0 @@ -36,11 +36,11 @@ dependencies: - aws-c-http=0.9.2 - aws-c-io=0.15.3 - aws-c-mqtt=0.11.0 -- aws-c-s3=0.7.7 -- aws-c-sdkutils=0.2.1 +- aws-c-s3=0.7.9 +- aws-c-sdkutils=0.2.2 - aws-checksums=0.2.2 -- aws-crt-cpp=0.29.7 -- aws-sdk-cpp=1.11.458 +- aws-crt-cpp=0.29.9 +- aws-sdk-cpp=1.11.489 - azure-core-cpp=1.14.0 - azure-identity-cpp=1.10.0 - azure-storage-blobs-cpp=12.13.0 @@ -49,6 +49,7 @@ dependencies: - babel=2.16.0 - beautifulsoup4=4.12.3 - bleach=6.2.0 +- bleach-with-css=6.2.0 - blosc=1.21.6 - bokeh=3.6.2 - bottleneck=1.4.2 @@ -70,13 +71,13 @@ dependencies: - certifi=2024.12.14 - cffi=1.17.1 - cfgv=3.3.1 -- cfitsio=4.4.1 +- cfitsio=4.5.0 - cftime=1.6.4 -- charset-normalizer=3.4.0 +- charset-normalizer=3.4.1 - click=8.1.8 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=3.1.0 +- cloudpickle=3.1.1 - coin-or-cbc=2.10.12 - coin-or-cgl=0.60.9 - coin-or-clp=1.17.10 @@ -92,36 +93,35 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cycler=0.12.1 - cyrus-sasl=2.1.27 - cytoolz=1.0.1 -- dask=2024.12.1 -- dask-core=2024.12.1 -- dask-expr=1.1.21 -- datapi=0.1.1 +- dask=2025.1.0 +- dask-core=2025.1.0 +- datapi=0.1.2 - datashader=0.16.3 - datrie=0.8.2 - dbus=1.13.6 -- debugpy=1.8.11 +- debugpy=1.8.12 - decorator=5.1.1 - defusedxml=0.7.1 - deprecation=2.1.0 - descartes=1.1.0 - distlib=0.3.9 -- distributed=2024.12.1 +- distributed=2025.1.0 - docutils=0.21.2 - dpath=2.2.0 - earth-osm=2.3.post1 -- entrypoints=0.4 - entsoe-py=0.6.11 - et_xmlfile=2.0.0 - exceptiongroup=1.2.2 - executing=2.1.0 - expat=2.6.4 -- filelock=3.16.1 +- filelock=3.17.0 - fiona=1.10.1 - fmt=11.0.2 -- folium=0.19.2 +- folium=0.19.4 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -129,7 +129,7 @@ dependencies: - fontconfig=2.15.0 - fonts-conda-ecosystem=1 - fonts-conda-forge=1 -- fonttools=4.55.3 +- fonttools=4.55.7 - fqdn=1.5.1 - freetype=2.12.1 - freexl=2.0.0 @@ -151,8 +151,8 @@ dependencies: - gettext-tools=0.22.5 - gflags=2.2.2 - giflib=5.2.2 -- gitdb=4.0.11 -- gitpython=3.1.43 +- gitdb=4.0.12 +- gitpython=3.1.44 - glib=2.82.2 - glib-tools=2.82.2 - glog=0.7.1 @@ -164,28 +164,29 @@ dependencies: - gstreamer=1.24.7 - gtk2=2.24.33 - gts=0.7.6 -- gurobi=12.0.0 +- gurobi=12.0.1 - h11=0.14.0 - h2=4.1.0 -- harfbuzz=10.1.0 +- harfbuzz=10.2.0 - hdf4=4.2.15 - hdf5=1.14.3 +- highspy=1.9.0 - holoviews=1.20.0 -- hpack=4.0.0 +- hpack=4.1.0 - httpcore=1.0.7 - httpx=0.28.1 - humanfriendly=10.0 - hvplot=0.11.2 -- hyperframe=6.0.1 +- hyperframe=6.1.0 - icu=75.1 -- identify=2.6.3 +- identify=2.6.6 - idna=3.10 -- importlib-metadata=8.5.0 -- importlib_metadata=8.5.0 -- importlib_resources=6.4.5 +- importlib-metadata=8.6.1 +- importlib_metadata=8.6.1 +- importlib_resources=6.5.2 - inflate64=1.0.1 - iniconfig=2.0.0 -- ipopt=3.14.17 +- ipopt=3.14.16 - ipykernel=6.29.5 - ipython=8.31.0 - isoduration=20.11.0 @@ -207,7 +208,7 @@ dependencies: - jupyterlab=4.3.4 - jupyterlab_pygments=0.3.0 - jupyterlab_server=2.27.3 -- kealib=1.6.0 +- kealib=1.6.1 - keyutils=1.6.1 - kiwisolver=1.4.7 - krb5=1.21.3 @@ -218,10 +219,10 @@ dependencies: - libabseil=20240722.0 - libaec=1.1.3 - libarchive=3.7.7 -- libarrow=18.1.0 -- libarrow-acero=18.1.0 -- libarrow-dataset=18.1.0 -- libarrow-substrait=18.1.0 +- libarrow=19.0.0 +- libarrow-acero=19.0.0 +- libarrow-dataset=19.0.0 +- libarrow-substrait=19.0.0 - libasprintf=0.22.5 - libasprintf-devel=0.22.5 - libblas=3.9.0 @@ -230,14 +231,14 @@ dependencies: - libbrotlienc=1.1.0 - libcap=2.71 - libcblas=3.9.0 -- libclang-cpp19.1=19.1.6 -- libclang13=19.1.6 +- libclang-cpp19.1=19.1.7 +- libclang13=19.1.7 - libcrc32c=1.1.2 - libcups=2.3.3 - libcurl=8.11.1 - libdeflate=1.23 - libdrm=2.4.124 -- libedit=3.1.20191231 +- libedit=3.1.20240808 - libegl=1.7.0 - libev=4.33 - libevent=2.1.12 @@ -272,8 +273,8 @@ dependencies: - libglvnd=1.7.0 - libglx=1.7.0 - libgomp=14.2.0 -- libgoogle-cloud=2.32.0 -- libgoogle-cloud-storage=2.32.0 +- libgoogle-cloud=2.34.0 +- libgoogle-cloud-storage=2.34.0 - libgpg-error=1.51 - libgrpc=1.67.1 - libhwloc=2.11.2 @@ -283,7 +284,7 @@ dependencies: - liblapack=3.9.0 - liblapacke=3.9.0 - libllvm14=14.0.6 -- libllvm19=19.1.6 +- libllvm19=19.1.7 - liblzma=5.6.3 - liblzma-devel=5.6.3 - libmamba=2.0.5 @@ -293,12 +294,14 @@ dependencies: - libntlm=1.8 - libogg=1.3.5 - libopenblas=0.3.28 +- libopentelemetry-cpp=1.18.0 +- libopentelemetry-cpp-headers=1.18.0 - libopus=1.3.1 -- libparquet=18.1.0 +- libparquet=19.0.0 - libpciaccess=0.18 -- libpng=1.6.44 +- libpng=1.6.46 - libpq=17.2 -- libprotobuf=5.28.2 +- libprotobuf=5.28.3 - libre2-11=2024.07.02 - librsvg=2.58.4 - librttopo=1.1.0 @@ -308,14 +311,14 @@ dependencies: - libsolv=0.7.30 - libspatialite=5.1.0 - libspral=2024.05.08 -- libsqlite=3.47.2 +- libsqlite=3.48.0 - libssh2=1.11.1 - libstdcxx=14.2.0 - libstdcxx-ng=14.2.0 -- libsystemd0=256.9 +- libsystemd0=257.2 - libthrift=0.21.0 - libtiff=4.7.0 -- libutf8proc=2.9.0 +- libutf8proc=2.10.0 - libuuid=2.38.1 - libvorbis=1.3.7 - libwebp-base=1.5.0 @@ -348,7 +351,8 @@ dependencies: - mercantile=1.2.1 - metis=5.1.0 - minizip=4.0.7 -- mistune=3.0.2 +- mistune=3.1.1 +- mpfr=4.2.1 - mpg123=1.32.9 - msgpack-python=1.1.0 - multipledispatch=0.6.0 @@ -360,7 +364,7 @@ dependencies: - mysql-common=9.0.1 - mysql-libs=9.0.1 - nbclient=0.10.2 -- nbconvert-core=7.16.4 +- nbconvert-core=7.16.6 - nbformat=5.10.4 - ncurses=6.5 - nest-asyncio=1.6.0 @@ -385,8 +389,8 @@ dependencies: - packaging=24.2 - pandas=2.2.2 - pandocfilters=1.5.0 -- panel=1.5.5 -- pango=1.54.0 +- panel=1.6.0 +- pango=1.56.1 - param=2.2.0 - parso=0.8.4 - partd=1.4.2 @@ -394,26 +398,27 @@ dependencies: - pcre2=10.44 - pexpect=4.9.0 - pickleshare=0.7.5 -- pillow=11.0.0 -- pip=24.3.1 +- pillow=11.1.0 +- pip=25.0 - pixman=0.44.2 - pkgutil-resolve-name=1.3.10 - plac=1.4.3 - platformdirs=4.3.6 - pluggy=1.5.0 - ply=3.11 -- polars=1.17.1 +- polars=1.21.0 - poppler=24.12.0 - poppler-data=0.4.12 - postgresql=17.2 -- powerplantmatching=0.6.0 -- pre-commit=4.0.1 +- powerplantmatching=0.7.0 +- pre-commit=4.1.0 - progressbar2=4.5.0 - proj=9.5.1 +- prometheus-cpp=1.3.0 - prometheus_client=0.21.1 -- prompt-toolkit=3.0.48 -- protobuf=5.28.2 -- psutil=6.1.0 +- prompt-toolkit=3.0.50 +- protobuf=5.28.3 +- psutil=6.1.1 - pthread-stubs=0.4 - ptyprocess=0.7.0 - pulp=2.7.0 @@ -421,23 +426,24 @@ dependencies: - pure_eval=0.2.3 - py-cpuinfo=9.0.0 - py7zr=0.22.0 -- pyarrow=18.1.0 -- pyarrow-core=18.1.0 +- pyarrow=19.0.0 +- pyarrow-core=19.0.0 - pybcj=1.0.3 - pycountry=24.6.1 - pycparser=2.22 - pycryptodomex=3.21.0 - pyct=0.5.0 - pydoe2=1.3.0 -- pygments=2.18.0 +- pygments=2.19.1 - pyogrio=0.10.0 -- pyomo=6.8.2 -- pyparsing=3.2.0 +- pyomo=6.6.1 +- pyparsing=3.2.1 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 - pyqt=5.15.9 - pyqt5-sip=12.12.2 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -446,11 +452,11 @@ dependencies: - python-dateutil=2.9.0.post0 - python-fastjsonschema=2.21.1 - python-json-logger=2.0.7 -- python-tzdata=2024.2 +- python-tzdata=2025.1 - python-utils=3.9.1 - python_abi=3.10 - pytz=2024.2 -- pyviz_comms=3.0.3 +- pyviz_comms=3.0.4 - pyyaml=6.0.2 - pyzmq=26.2.0 - pyzstd=0.16.2 @@ -458,7 +464,7 @@ dependencies: - rasterio=1.3.11 - re2=2024.07.02 - readline=8.2 -- referencing=0.35.1 +- referencing=0.36.2 - reproc=14.2.5.post0 - reproc-cpp=14.2.5.post0 - requests=2.32.3 @@ -466,19 +472,20 @@ dependencies: - reverse-geocode=1.4.1 - rfc3339-validator=0.1.4 - rfc3986-validator=0.1.1 -- rioxarray=0.17.0 +- rioxarray=0.18.1 - rpds-py=0.22.3 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 -- s2n=1.5.10 -- scikit-learn=1.6.0 -- scipy=1.14.1 +- s2n=1.5.11 +- scikit-learn=1.6.1 +- scip=9.1.1 +- scipy=1.15.1 - seaborn=0.13.2 - seaborn-base=0.13.2 - send2trash=1.8.3 -- setuptools=75.6.0 +- setuptools=75.8.0 - shapely=2.0.6 -- simdjson=3.11.3 +- simdjson=3.11.6 - sip=6.7.12 - six=1.17.0 - smart_open=7.1.0 @@ -489,12 +496,13 @@ dependencies: - snuggs=1.4.7 - sortedcontainers=2.4.0 - soupsieve=2.5 -- spdlog=1.14.1 -- sqlite=3.47.2 +- spdlog=1.15.0 +- sqlite=3.48.0 - stack_data=0.6.3 - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -510,27 +518,28 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 - typing_utils=0.1.0 -- tzcode=2024b -- tzdata=2024b +- tzcode=2025a +- tzdata=2025a - uc-micro-py=1.0.3 - ukkonen=1.0.1 -- unicodedata2=15.1.0 +- unicodedata2=16.0.0 - unidecode=1.3.8 - uri-template=1.3.0 - uriparser=0.9.8 - urllib3=2.3.0 - validators=0.34.0 -- virtualenv=20.28.0 +- virtualenv=20.29.1 - wcwidth=0.2.13 - webcolors=24.11.1 - webencodings=0.5.1 - websocket-client=1.8.0 - wheel=0.45.1 -- wrapt=1.17.0 +- wrapt=1.17.2 - xarray=2023.11.0 - xcb-util=0.4.1 - xcb-util-image=0.4.0 @@ -550,25 +559,23 @@ dependencies: - xorg-libxfixes=6.0.1 - xorg-libxrender=0.9.12 - xorg-libxxf86vm=1.1.6 -- xyzservices=2024.9.0 +- xyzservices=2025.1.0 - xz=5.6.3 - xz-gpl-tools=5.6.3 - xz-tools=5.6.3 - yaml=0.2.5 - yaml-cpp=0.8.0 -- yte=1.5.5 +- yte=1.5.6 - zeromq=4.3.5 - zict=3.0.0 - zipfile-deflate64=0.2.0 - zipp=3.21.0 - zlib=1.3.1 -- zlib-ng=2.2.2 +- zlib-ng=2.2.3 - zstandard=0.23.0 - zstd=1.5.6 - pip: - chaospy==4.3.17 - fake-useragent==2.0.3 - googledrivedownloader==0.4 - - highspy==1.9.0 - - tsam==2.3.6 prefix: /usr/share/miniconda/envs/pypsa-earth diff --git a/envs/macos-pinned.yaml b/envs/macos-pinned.yaml index 7c053a54c..7e9149a18 100644 --- a/envs/macos-pinned.yaml +++ b/envs/macos-pinned.yaml @@ -14,7 +14,7 @@ dependencies: - affine=2.4.0 - ampl-asl=1.0.0 - amply=0.1.6 -- anyio=4.7.0 +- anyio=4.8.0 - appdirs=1.4.4 - appnope=0.1.4 - argon2-cffi=23.1.0 @@ -24,8 +24,8 @@ dependencies: - async-lru=2.0.4 - atk-1.0=2.38.0 - atlite=0.3.0 -- attrs=24.3.0 -- aws-c-auth=0.8.0 +- attrs=25.1.0 +- aws-c-auth=0.8.1 - aws-c-cal=0.8.1 - aws-c-common=0.10.6 - aws-c-compression=0.3.0 @@ -33,11 +33,11 @@ dependencies: - aws-c-http=0.9.2 - aws-c-io=0.15.3 - aws-c-mqtt=0.11.0 -- aws-c-s3=0.7.7 -- aws-c-sdkutils=0.2.1 +- aws-c-s3=0.7.9 +- aws-c-sdkutils=0.2.2 - aws-checksums=0.2.2 -- aws-crt-cpp=0.29.7 -- aws-sdk-cpp=1.11.458 +- aws-crt-cpp=0.29.9 +- aws-sdk-cpp=1.11.489 - azure-core-cpp=1.14.0 - azure-identity-cpp=1.10.0 - azure-storage-blobs-cpp=12.13.0 @@ -46,6 +46,7 @@ dependencies: - babel=2.16.0 - beautifulsoup4=4.12.3 - bleach=6.2.0 +- bleach-with-css=6.2.0 - blosc=1.21.6 - bokeh=3.6.2 - bottleneck=1.4.2 @@ -67,13 +68,13 @@ dependencies: - certifi=2024.12.14 - cffi=1.17.1 - cfgv=3.3.1 -- cfitsio=4.4.1 +- cfitsio=4.5.0 - cftime=1.6.4 -- charset-normalizer=3.4.0 +- charset-normalizer=3.4.1 - click=8.1.8 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=3.1.0 +- cloudpickle=3.1.1 - coin-or-cbc=2.10.12 - coin-or-cgl=0.60.9 - coin-or-clp=1.17.10 @@ -89,34 +90,33 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cycler=0.12.1 - cyrus-sasl=2.1.27 - cytoolz=1.0.1 -- dask=2024.12.1 -- dask-core=2024.12.1 -- dask-expr=1.1.21 -- datapi=0.1.1 +- dask=2025.1.0 +- dask-core=2025.1.0 +- datapi=0.1.2 - datashader=0.16.3 - datrie=0.8.2 -- debugpy=1.8.11 +- debugpy=1.8.12 - decorator=5.1.1 - defusedxml=0.7.1 - deprecation=2.1.0 - descartes=1.1.0 - distlib=0.3.9 -- distributed=2024.12.1 +- distributed=2025.1.0 - docutils=0.21.2 - dpath=2.2.0 - earth-osm=2.3.post1 -- entrypoints=0.4 - entsoe-py=0.6.11 - et_xmlfile=2.0.0 - exceptiongroup=1.2.2 - executing=2.1.0 -- filelock=3.16.1 +- filelock=3.17.0 - fiona=1.10.1 - fmt=11.0.2 -- folium=0.19.2 +- folium=0.19.4 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -124,7 +124,7 @@ dependencies: - fontconfig=2.15.0 - fonts-conda-ecosystem=1 - fonts-conda-forge=1 -- fonttools=4.55.3 +- fonttools=4.55.7 - fqdn=1.5.1 - freetype=2.12.1 - freexl=2.0.0 @@ -144,8 +144,8 @@ dependencies: - geoviews-core=1.14.0 - gflags=2.2.2 - giflib=5.2.2 -- gitdb=4.0.11 -- gitpython=3.1.43 +- gitdb=4.0.12 +- gitpython=3.1.44 - glog=0.7.1 - glpk=5.0 - gmp=6.3.0 @@ -153,28 +153,29 @@ dependencies: - graphviz=12.0.0 - gtk2=2.24.33 - gts=0.7.6 -- gurobi=12.0.0 +- gurobi=12.0.1 - h11=0.14.0 - h2=4.1.0 -- harfbuzz=10.1.0 +- harfbuzz=10.2.0 - hdf4=4.2.15 - hdf5=1.14.3 +- highspy=1.9.0 - holoviews=1.20.0 -- hpack=4.0.0 +- hpack=4.1.0 - httpcore=1.0.7 - httpx=0.28.1 - humanfriendly=10.0 - hvplot=0.11.2 -- hyperframe=6.0.1 +- hyperframe=6.1.0 - icu=75.1 -- identify=2.6.3 +- identify=2.6.6 - idna=3.10 -- importlib-metadata=8.5.0 -- importlib_metadata=8.5.0 -- importlib_resources=6.4.5 +- importlib-metadata=8.6.1 +- importlib_metadata=8.6.1 +- importlib_resources=6.5.2 - inflate64=1.0.1 - iniconfig=2.0.0 -- ipopt=3.14.17 +- ipopt=3.14.16 - ipykernel=6.29.5 - ipython=8.31.0 - isoduration=20.11.0 @@ -196,7 +197,7 @@ dependencies: - jupyterlab=4.3.4 - jupyterlab_pygments=0.3.0 - jupyterlab_server=2.27.3 -- kealib=1.6.0 +- kealib=1.6.1 - kiwisolver=1.4.7 - krb5=1.21.3 - lcms2=2.16 @@ -204,10 +205,10 @@ dependencies: - libabseil=20240722.0 - libaec=1.1.3 - libarchive=3.7.7 -- libarrow=18.1.0 -- libarrow-acero=18.1.0 -- libarrow-dataset=18.1.0 -- libarrow-substrait=18.1.0 +- libarrow=19.0.0 +- libarrow-acero=19.0.0 +- libarrow-dataset=19.0.0 +- libarrow-substrait=19.0.0 - libblas=3.9.0 - libbrotlicommon=1.1.0 - libbrotlidec=1.1.0 @@ -215,9 +216,9 @@ dependencies: - libcblas=3.9.0 - libcrc32c=1.1.2 - libcurl=8.11.1 -- libcxx=19.1.6 +- libcxx=19.1.7 - libdeflate=1.23 -- libedit=3.1.20191231 +- libedit=3.1.20240808 - libev=4.33 - libevent=2.1.12 - libexpat=2.6.4 @@ -240,9 +241,10 @@ dependencies: - libgfortran=5.0.0 - libgfortran5=13.2.0 - libglib=2.82.2 -- libgoogle-cloud=2.32.0 -- libgoogle-cloud-storage=2.32.0 +- libgoogle-cloud=2.34.0 +- libgoogle-cloud-storage=2.34.0 - libgrpc=1.67.1 +- libhwloc=2.11.2 - libiconv=1.17 - libintl=0.22.5 - libjpeg-turbo=3.0.0 @@ -256,22 +258,24 @@ dependencies: - libnghttp2=1.64.0 - libntlm=1.8 - libopenblas=0.3.28 -- libparquet=18.1.0 -- libpng=1.6.44 +- libopentelemetry-cpp=1.18.0 +- libopentelemetry-cpp-headers=1.18.0 +- libparquet=19.0.0 +- libpng=1.6.46 - libpq=17.2 -- libprotobuf=5.28.2 +- libprotobuf=5.28.3 - libre2-11=2024.07.02 - librsvg=2.58.4 - librttopo=1.1.0 -- libscotch=7.0.5 +- libscotch=7.0.6 - libsodium=1.0.20 - libsolv=0.7.30 - libspatialite=5.1.0 -- libsqlite=3.47.2 +- libsqlite=3.48.0 - libssh2=1.11.1 - libthrift=0.21.0 - libtiff=4.7.0 -- libutf8proc=2.9.0 +- libutf8proc=2.10.0 - libwebp-base=1.5.0 - libxcb=1.17.0 - libxml2=2.13.5 @@ -280,7 +284,7 @@ dependencies: - libzlib=1.3.1 - linkify-it-py=2.0.3 - linopy=0.3.11 -- llvm-openmp=19.1.6 +- llvm-openmp=19.1.7 - llvmlite=0.43.0 - locket=1.0.0 - lxml=5.3.0 @@ -301,7 +305,8 @@ dependencies: - mercantile=1.2.1 - metis=5.1.0 - minizip=4.0.7 -- mistune=3.0.2 +- mistune=3.1.1 +- mpfr=4.2.1 - msgpack-python=1.1.0 - multipledispatch=0.6.0 - multiurl=0.3.3 @@ -310,7 +315,7 @@ dependencies: - mumps-seq=5.7.3 - munkres=1.1.4 - nbclient=0.10.2 -- nbconvert-core=7.16.4 +- nbconvert-core=7.16.6 - nbformat=5.10.4 - ncurses=6.5 - nest-asyncio=1.6.0 @@ -334,8 +339,8 @@ dependencies: - packaging=24.2 - pandas=2.2.2 - pandocfilters=1.5.0 -- panel=1.5.5 -- pango=1.54.0 +- panel=1.6.0 +- pango=1.56.1 - param=2.2.0 - parso=0.8.4 - partd=1.4.2 @@ -343,49 +348,51 @@ dependencies: - pcre2=10.44 - pexpect=4.9.0 - pickleshare=0.7.5 -- pillow=11.0.0 -- pip=24.3.1 +- pillow=11.1.0 +- pip=25.0 - pixman=0.44.2 - pkgutil-resolve-name=1.3.10 - plac=1.4.3 - platformdirs=4.3.6 - pluggy=1.5.0 - ply=3.11 -- polars=1.17.1 +- polars=1.21.0 - poppler=24.12.0 - poppler-data=0.4.12 - postgresql=17.2 -- powerplantmatching=0.6.0 -- pre-commit=4.0.1 +- powerplantmatching=0.7.0 +- pre-commit=4.1.0 - progressbar2=4.5.0 - proj=9.5.1 +- prometheus-cpp=1.3.0 - prometheus_client=0.21.1 -- prompt-toolkit=3.0.48 -- protobuf=5.28.2 -- psutil=6.1.0 +- prompt-toolkit=3.0.50 +- protobuf=5.28.3 +- psutil=6.1.1 - pthread-stubs=0.4 - ptyprocess=0.7.0 - pulp=2.7.0 - pure_eval=0.2.3 - py-cpuinfo=9.0.0 - py7zr=0.22.0 -- pyarrow=18.1.0 -- pyarrow-core=18.1.0 +- pyarrow=19.0.0 +- pyarrow-core=19.0.0 - pybcj=1.0.3 - pycountry=24.6.1 - pycparser=2.22 - pycryptodomex=3.21.0 - pyct=0.5.0 - pydoe2=1.3.0 -- pygments=2.18.0 -- pyobjc-core=10.3.2 -- pyobjc-framework-cocoa=10.3.2 +- pygments=2.19.1 +- pyobjc-core=11.0 +- pyobjc-framework-cocoa=11.0 - pyogrio=0.10.0 -- pyomo=6.8.2 -- pyparsing=3.2.0 +- pyomo=6.6.1 +- pyparsing=3.2.1 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -394,18 +401,18 @@ dependencies: - python-dateutil=2.9.0.post0 - python-fastjsonschema=2.21.1 - python-json-logger=2.0.7 -- python-tzdata=2024.2 +- python-tzdata=2025.1 - python-utils=3.9.1 - python_abi=3.10 - pytz=2024.2 -- pyviz_comms=3.0.3 +- pyviz_comms=3.0.4 - pyyaml=6.0.2 - pyzmq=26.2.0 - pyzstd=0.16.2 - rasterio=1.3.11 - re2=2024.07.02 - readline=8.2 -- referencing=0.35.1 +- referencing=0.36.2 - reproc=14.2.5.post0 - reproc-cpp=14.2.5.post0 - requests=2.32.3 @@ -413,18 +420,19 @@ dependencies: - reverse-geocode=1.4.1 - rfc3339-validator=0.1.4 - rfc3986-validator=0.1.1 -- rioxarray=0.17.0 +- rioxarray=0.18.1 - rpds-py=0.22.3 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 -- scikit-learn=1.6.0 -- scipy=1.14.1 +- scikit-learn=1.6.1 +- scip=9.1.1 +- scipy=1.15.1 - seaborn=0.13.2 - seaborn-base=0.13.2 - send2trash=1.8.3 -- setuptools=75.6.0 +- setuptools=75.8.0 - shapely=2.0.6 -- simdjson=3.11.3 +- simdjson=3.11.6 - six=1.17.0 - smart_open=7.1.0 - smmap=5.0.0 @@ -434,12 +442,13 @@ dependencies: - snuggs=1.4.7 - sortedcontainers=2.4.0 - soupsieve=2.5 -- spdlog=1.14.1 -- sqlite=3.47.2 +- spdlog=1.15.0 +- sqlite=3.48.0 - stack_data=0.6.3 - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -454,48 +463,47 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 - typing_utils=0.1.0 -- tzcode=2024b -- tzdata=2024b +- tzcode=2025a +- tzdata=2025a - uc-micro-py=1.0.3 - ukkonen=1.0.1 -- unicodedata2=15.1.0 +- unicodedata2=16.0.0 - unidecode=1.3.8 - uri-template=1.3.0 - uriparser=0.9.8 - urllib3=2.3.0 - validators=0.34.0 -- virtualenv=20.28.0 +- virtualenv=20.29.1 - wcwidth=0.2.13 - webcolors=24.11.1 - webencodings=0.5.1 - websocket-client=1.8.0 - wheel=0.45.1 -- wrapt=1.17.0 +- wrapt=1.17.2 - xarray=2023.11.0 - xerces-c=3.2.5 - xlrd=2.0.1 - xorg-libxau=1.0.12 - xorg-libxdmcp=1.1.5 -- xyzservices=2024.9.0 +- xyzservices=2025.1.0 - yaml=0.2.5 - yaml-cpp=0.8.0 -- yte=1.5.5 +- yte=1.5.6 - zeromq=4.3.5 - zict=3.0.0 - zipfile-deflate64=0.2.0 - zipp=3.21.0 - zlib=1.3.1 -- zlib-ng=2.2.2 +- zlib-ng=2.2.3 - zstandard=0.23.0 - zstd=1.5.6 - pip: - chaospy==4.3.17 - fake-useragent==2.0.3 - googledrivedownloader==0.4 - - highspy==1.9.0 - - tsam==2.3.6 prefix: /Users/runner/miniconda3/envs/pypsa-earth diff --git a/envs/windows-pinned.yaml b/envs/windows-pinned.yaml index 9d14aa2a7..129d21fc9 100644 --- a/envs/windows-pinned.yaml +++ b/envs/windows-pinned.yaml @@ -14,9 +14,8 @@ channels: dependencies: - _openmp_mutex=4.5 - affine=2.4.0 -- ampl-asl=1.0.0 - amply=0.1.6 -- anyio=4.7.0 +- anyio=4.8.0 - appdirs=1.4.4 - argon2-cffi=23.1.0 - argon2-cffi-bindings=21.2.0 @@ -24,8 +23,8 @@ dependencies: - asttokens=3.0.0 - async-lru=2.0.4 - atlite=0.3.0 -- attrs=24.3.0 -- aws-c-auth=0.8.0 +- attrs=25.1.0 +- aws-c-auth=0.8.1 - aws-c-cal=0.8.1 - aws-c-common=0.10.6 - aws-c-compression=0.3.0 @@ -33,11 +32,11 @@ dependencies: - aws-c-http=0.9.2 - aws-c-io=0.15.3 - aws-c-mqtt=0.11.0 -- aws-c-s3=0.7.7 -- aws-c-sdkutils=0.2.1 +- aws-c-s3=0.7.9 +- aws-c-sdkutils=0.2.2 - aws-checksums=0.2.2 -- aws-crt-cpp=0.29.7 -- aws-sdk-cpp=1.11.458 +- aws-crt-cpp=0.29.9 +- aws-sdk-cpp=1.11.489 - azure-core-cpp=1.14.0 - azure-identity-cpp=1.10.0 - azure-storage-blobs-cpp=12.13.0 @@ -45,6 +44,7 @@ dependencies: - babel=2.16.0 - beautifulsoup4=4.12.3 - bleach=6.2.0 +- bleach-with-css=6.2.0 - blosc=1.21.6 - bokeh=3.6.2 - bottleneck=1.4.2 @@ -66,13 +66,13 @@ dependencies: - certifi=2024.12.14 - cffi=1.17.1 - cfgv=3.3.1 -- cfitsio=4.4.1 +- cfitsio=4.5.0 - cftime=1.6.4 -- charset-normalizer=3.4.0 +- charset-normalizer=3.4.1 - click=8.1.8 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=3.1.0 +- cloudpickle=3.1.1 - colorama=0.4.6 - colorcet=3.1.0 - comm=0.2.2 @@ -82,34 +82,33 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cpython=3.10.16 - cycler=0.12.1 - cytoolz=1.0.1 -- dask=2024.12.1 -- dask-core=2024.12.1 -- dask-expr=1.1.21 -- datapi=0.1.1 +- dask=2025.1.0 +- dask-core=2025.1.0 +- datapi=0.1.2 - datashader=0.16.3 - datrie=0.8.2 -- debugpy=1.8.11 +- debugpy=1.8.12 - decorator=5.1.1 - defusedxml=0.7.1 - deprecation=2.1.0 - descartes=1.1.0 - distlib=0.3.9 -- distributed=2024.12.1 +- distributed=2025.1.0 - docutils=0.21.2 - dpath=2.2.0 - earth-osm=2.3.post1 -- entrypoints=0.4 - entsoe-py=0.6.11 - et_xmlfile=2.0.0 - exceptiongroup=1.2.2 - executing=2.1.0 -- filelock=3.16.1 +- filelock=3.17.0 - fiona=1.10.1 - fmt=11.0.2 -- folium=0.19.2 +- folium=0.19.4 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -117,7 +116,7 @@ dependencies: - fontconfig=2.15.0 - fonts-conda-ecosystem=1 - fonts-conda-forge=1 -- fonttools=4.55.3 +- fonttools=4.55.7 - fqdn=1.5.1 - freetype=2.12.1 - freexl=2.0.0 @@ -135,38 +134,40 @@ dependencies: - geoviews=1.14.0 - geoviews-core=1.14.0 - getopt-win32=0.1 -- gitdb=4.0.11 -- gitpython=3.1.43 +- gitdb=4.0.12 +- gitpython=3.1.44 - glib=2.82.2 - glib-tools=2.82.2 - glpk=5.0 +- gmp=6.3.0 - graphite2=1.3.13 - graphviz=12.0.0 - gst-plugins-base=1.24.7 - gstreamer=1.24.7 - gts=0.7.6 -- gurobi=12.0.0 +- gurobi=12.0.1 - h11=0.14.0 - h2=4.1.0 -- harfbuzz=10.1.0 +- harfbuzz=10.2.0 - hdf4=4.2.15 - hdf5=1.14.3 +- highspy=1.9.0 - holoviews=1.20.0 -- hpack=4.0.0 +- hpack=4.1.0 - httpcore=1.0.7 - httpx=0.28.1 - humanfriendly=10.0 - hvplot=0.11.2 -- hyperframe=6.0.1 +- hyperframe=6.1.0 - icu=75.1 -- identify=2.6.3 +- identify=2.6.6 - idna=3.10 -- importlib-metadata=8.5.0 -- importlib_metadata=8.5.0 -- importlib_resources=6.4.5 +- importlib-metadata=8.6.1 +- importlib_metadata=8.6.1 +- importlib_resources=6.5.2 - inflate64=1.0.1 - iniconfig=2.0.0 -- ipopt=3.14.17 +- ipopt=3.14.16 - ipykernel=6.29.5 - ipython=8.31.0 - isoduration=20.11.0 @@ -187,7 +188,7 @@ dependencies: - jupyterlab=4.3.4 - jupyterlab_pygments=0.3.0 - jupyterlab_server=2.27.3 -- kealib=1.6.0 +- kealib=1.6.1 - kiwisolver=1.4.7 - krb5=1.21.3 - lcms2=2.16 @@ -195,16 +196,17 @@ dependencies: - libabseil=20240722.0 - libaec=1.1.3 - libarchive=3.7.7 -- libarrow=18.1.0 -- libarrow-acero=18.1.0 -- libarrow-dataset=18.1.0 -- libarrow-substrait=18.1.0 +- libarrow=19.0.0 +- libarrow-acero=19.0.0 +- libarrow-dataset=19.0.0 +- libarrow-substrait=19.0.0 - libblas=3.9.0 +- libboost=1.86.0 - libbrotlicommon=1.1.0 - libbrotlidec=1.1.0 - libbrotlienc=1.1.0 - libcblas=3.9.0 -- libclang13=19.1.6 +- libclang13=19.1.7 - libcrc32c=1.1.2 - libcurl=8.11.1 - libdeflate=1.23 @@ -230,9 +232,10 @@ dependencies: - libgdal-xls=3.9.3 - libglib=2.82.2 - libgomp=14.2.0 -- libgoogle-cloud=2.32.0 -- libgoogle-cloud-storage=2.32.0 +- libgoogle-cloud=2.34.0 +- libgoogle-cloud-storage=2.34.0 - libgrpc=1.67.1 +- libhwloc=2.11.2 - libiconv=1.17 - libintl=0.22.5 - libintl-devel=0.22.5 @@ -244,20 +247,20 @@ dependencies: - libnetcdf=4.9.2 - libogg=1.3.5 - libopenblas=0.3.28 -- libparquet=18.1.0 -- libpng=1.6.44 +- libparquet=19.0.0 +- libpng=1.6.46 - libpq=17.2 -- libprotobuf=5.28.2 +- libprotobuf=5.28.3 - libre2-11=2024.07.02 - librttopo=1.1.0 - libsodium=1.0.20 - libsolv=0.7.30 - libspatialite=5.1.0 -- libsqlite=3.47.2 +- libsqlite=3.48.0 - libssh2=1.11.1 - libthrift=0.21.0 - libtiff=4.7.0 -- libutf8proc=2.9.0 +- libutf8proc=2.10.0 - libvorbis=1.3.7 - libwebp-base=1.5.0 - libwinpthread=12.0.0.r4.gg4f2fc60ca @@ -288,7 +291,8 @@ dependencies: - memory_profiler=0.61.0 - mercantile=1.2.1 - minizip=4.0.7 -- mistune=3.0.2 +- mistune=3.1.1 +- mpfr=4.2.1 - msgpack-python=1.1.0 - multipledispatch=0.6.0 - multiurl=0.3.3 @@ -296,7 +300,7 @@ dependencies: - mumps-seq=5.7.3 - munkres=1.1.4 - nbclient=0.10.2 -- nbconvert-core=7.16.4 +- nbconvert-core=7.16.6 - nbformat=5.10.4 - nest-asyncio=1.6.0 - netcdf4=1.7.2 @@ -318,16 +322,16 @@ dependencies: - packaging=24.2 - pandas=2.2.2 - pandocfilters=1.5.0 -- panel=1.5.5 -- pango=1.54.0 +- panel=1.6.0 +- pango=1.56.1 - param=2.2.0 - parso=0.8.4 - partd=1.4.2 - patsy=1.0.1 - pcre2=10.44 - pickleshare=0.7.5 -- pillow=11.0.0 -- pip=24.3.1 +- pillow=11.1.0 +- pip=25.0 - pixman=0.44.2 - pkgutil-resolve-name=1.3.10 - plac=1.4.3 @@ -337,37 +341,38 @@ dependencies: - poppler=24.12.0 - poppler-data=0.4.12 - postgresql=17.2 -- powerplantmatching=0.6.0 -- pre-commit=4.0.1 +- powerplantmatching=0.7.0 +- pre-commit=4.1.0 - progressbar2=4.5.0 - proj=9.5.1 - prometheus_client=0.21.1 -- prompt-toolkit=3.0.48 -- protobuf=5.28.2 -- psutil=6.1.0 +- prompt-toolkit=3.0.50 +- protobuf=5.28.3 +- psutil=6.1.1 - pthread-stubs=0.4 - pulp=2.7.0 - pure_eval=0.2.3 - py-cpuinfo=9.0.0 - py7zr=0.22.0 -- pyarrow=18.1.0 -- pyarrow-core=18.1.0 +- pyarrow=19.0.0 +- pyarrow-core=19.0.0 - pybcj=1.0.3 - pycountry=24.6.1 - pycparser=2.22 - pycryptodomex=3.21.0 - pyct=0.5.0 - pydoe2=1.3.0 -- pygments=2.18.0 +- pygments=2.19.1 - pyogrio=0.10.0 -- pyomo=6.8.2 -- pyparsing=3.2.0 +- pyomo=6.6.1 +- pyparsing=3.2.1 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 - pyqt=5.15.9 - pyqt5-sip=12.12.2 - pyreadline3=3.5.4 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -376,11 +381,11 @@ dependencies: - python-dateutil=2.9.0.post0 - python-fastjsonschema=2.21.1 - python-json-logger=2.0.7 -- python-tzdata=2024.2 +- python-tzdata=2025.1 - python-utils=3.9.1 - python_abi=3.10 - pytz=2024.2 -- pyviz_comms=3.0.3 +- pyviz_comms=3.0.4 - pywin32=307 - pywinpty=2.0.14 - pyyaml=6.0.2 @@ -389,7 +394,7 @@ dependencies: - qt-main=5.15.15 - rasterio=1.3.11 - re2=2024.07.02 -- referencing=0.35.1 +- referencing=0.36.2 - reproc=14.2.5.post0 - reproc-cpp=14.2.5.post0 - requests=2.32.3 @@ -397,18 +402,19 @@ dependencies: - reverse-geocode=1.4.1 - rfc3339-validator=0.1.4 - rfc3986-validator=0.1.1 -- rioxarray=0.17.0 +- rioxarray=0.18.1 - rpds-py=0.22.3 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 -- scikit-learn=1.6.0 -- scipy=1.14.1 +- scikit-learn=1.6.1 +- scip=9.1.1 +- scipy=1.15.1 - seaborn=0.13.2 - seaborn-base=0.13.2 - send2trash=1.8.3 -- setuptools=75.6.0 +- setuptools=75.8.0 - shapely=2.0.6 -- simdjson=3.11.3 +- simdjson=3.11.6 - sip=6.7.12 - six=1.17.0 - smart_open=7.1.0 @@ -419,12 +425,13 @@ dependencies: - snuggs=1.4.7 - sortedcontainers=2.4.0 - soupsieve=2.5 -- spdlog=1.14.1 -- sqlite=3.47.2 +- spdlog=1.15.0 +- sqlite=3.48.0 - stack_data=0.6.3 - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -440,15 +447,16 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 - typing_utils=0.1.0 -- tzdata=2024b +- tzdata=2025a - uc-micro-py=1.0.3 - ucrt=10.0.22621.0 - ukkonen=1.0.1 -- unicodedata2=15.1.0 +- unicodedata2=16.0.0 - unidecode=1.3.8 - uri-template=1.3.0 - uriparser=0.9.8 @@ -456,7 +464,7 @@ dependencies: - validators=0.34.0 - vc=14.3 - vc14_runtime=14.42.34433 -- virtualenv=20.28.0 +- virtualenv=20.29.1 - vs2015_runtime=14.42.34433 - wcwidth=0.2.13 - webcolors=24.11.1 @@ -465,7 +473,7 @@ dependencies: - wheel=0.45.1 - win_inet_pton=1.1.0 - winpty=0.4.3 -- wrapt=1.17.0 +- wrapt=1.17.2 - xarray=2023.11.0 - xerces-c=3.2.5 - xlrd=2.0.1 @@ -477,23 +485,21 @@ dependencies: - xorg-libxext=1.3.6 - xorg-libxpm=3.5.17 - xorg-libxt=1.3.1 -- xyzservices=2024.9.0 +- xyzservices=2025.1.0 - yaml=0.2.5 - yaml-cpp=0.8.0 -- yte=1.5.5 +- yte=1.5.6 - zeromq=4.3.5 - zict=3.0.0 - zipfile-deflate64=0.2.0 - zipp=3.21.0 - zlib=1.3.1 -- zlib-ng=2.2.2 +- zlib-ng=2.2.3 - zstandard=0.23.0 - zstd=1.5.6 - pip: - chaospy==4.3.17 - fake-useragent==2.0.3 - googledrivedownloader==0.4 - - highspy==1.9.0 - - polars==1.17.1 - - tsam==2.3.6 + - polars==1.21.0 prefix: C:\Miniconda\envs\pypsa-earth diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 8b8135063..33d5c7d78 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -922,6 +922,16 @@ def get_last_commit_message(path): return last_commit_message +def update_config_dictionary( + config_dict, + parameter_key_to_fill="lines", + dict_to_use={"geometry": "first", "bounds": "first"}, +): + config_dict.setdefault(parameter_key_to_fill, {}) + config_dict[parameter_key_to_fill].update(dict_to_use) + return config_dict + + # PYPSA-EARTH-SEC def annuity(n, r): """ @@ -1274,7 +1284,7 @@ def get_GADM_layer(country_list, layer_id, update=False, outlogging=False): def locate_bus( coords, co, - gadm_level, + gadm_layer_id, path_to_gadm=None, gadm_clustering=False, col="name", @@ -1306,8 +1316,8 @@ def locate_bus( lambda name: three_2_two_digits_country(name[:3]) + name[3:] ) else: - gdf = get_GADM_layer(co, gadm_level) - col = "GID_{}".format(gadm_level) + gdf = get_GADM_layer(co, gadm_layer_id) + col = "GID_{}".format(gadm_layer_id) # gdf.set_index("GADM_ID", inplace=True) gdf_co = gdf[ @@ -1484,3 +1494,91 @@ def safe_divide(numerator, denominator, default_value=np.nan): f"Division by zero: {numerator} / {denominator}, returning NaN." ) return np.nan + + +def lossy_bidirectional_links(n, carrier): + """ + Split bidirectional links of type carrier into two unidirectional links to include transmission losses. + """ + + # identify all links of type carrier + carrier_i = n.links.query("carrier == @carrier").index + + if carrier_i.empty: + return + + logger.info(f"Splitting bidirectional links with the carrier {carrier}") + + # set original links to be unidirectional + n.links.loc[carrier_i, "p_min_pu"] = 0 + + # add a new links that mirror the original links, but represent the reversed flow direction + # the new links have a cost and length of 0 to not distort the overall cost and network length + rev_links = ( + n.links.loc[carrier_i].copy().rename({"bus0": "bus1", "bus1": "bus0"}, axis=1) + ) + rev_links["length_original"] = rev_links[ + "length" + ] # tracker for the length of the original links length + rev_links["capital_cost"] = 0 + rev_links["length"] = 0 + rev_links["reversed"] = True # tracker for easy identification of reversed links + rev_links.index = rev_links.index.map(lambda x: x + "-reversed") + + # add the new reversed links to the network and fill the newly created trackers with default values for the other links + n.links = pd.concat([n.links, rev_links], sort=False) + n.links["reversed"] = n.links["reversed"].fillna(False).infer_objects(copy=False) + n.links["length_original"] = n.links["length_original"].fillna(n.links.length) + + +def set_length_based_efficiency(n, carrier, bus_suffix, transmission_efficiency): + """ + Set the efficiency of all links of type carrier in network n based on their length and the values specified in the config. + Additionally add the length based electricity demand required for compression (if applicable). + The bus_suffix refers to the suffix that differentiates the links bus0 from the corresponding electricity bus, i.e. " H2". + Important: + Call this function AFTER lossy_bidirectional_links when creating links that are both bidirectional and lossy, + and have a length based electricity demand for compression. Otherwise the compression will not consistently take place at + the inflow bus and instead vary between the inflow and the outflow bus. + """ + + # get the links length based efficiency and required compression + if carrier not in transmission_efficiency: + raise KeyError( + f"An error occurred when setting the length based efficiency for the Links of type {carrier}." + f"The Link type {carrier} was not found in the config under config['sector']['transmission_efficiency']." + ) + efficiencies = transmission_efficiency[carrier] + efficiency_static = efficiencies.get("efficiency_static", 1) + efficiency_per_1000km = efficiencies.get("efficiency_per_1000km", 1) + compression_per_1000km = efficiencies.get("compression_per_1000km", 0) + + # indetify all links of type carrier + carrier_i = n.links.loc[n.links.carrier == carrier].index + + # identify the lengths of all links of type carrier + # use "length_original" for lossy bidirectional links and "length" for any other link + if ("reversed" in n.links.columns) and any(n.links.loc[carrier_i, "reversed"]): + lengths = n.links.loc[carrier_i, "length_original"] + else: + lengths = n.links.loc[carrier_i, "length"] + + # set the links' length based efficiency + n.links.loc[carrier_i, "efficiency"] = ( + efficiency_static * efficiency_per_1000km ** (lengths / 1e3) + ) + + # set the links's electricity demand for compression + if compression_per_1000km > 0: + # connect the links to their corresponding electricity buses + n.links.loc[carrier_i, "bus2"] = n.links.loc[ + carrier_i, "bus0" + ].str.removesuffix(bus_suffix) + # TODO: use these lines to set bus 2 instead, once n.buses.location is functional and remove bus_suffix. + """ + n.links.loc[carrier_i, "bus2"] = n.links.loc[carrier_i, "bus0"].map( + n.buses.location + ) # electricity + """ + # set the required compression demand + n.links.loc[carrier_i, "efficiency2"] = -compression_per_1000km * lengths / 1e3 diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 75ae9ce42..84232f4d8 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -488,7 +488,10 @@ def attach_hydro(n, costs, ppl): ror = ppl.query('technology == "Run-Of-River"') phs = ppl.query('technology == "Pumped Storage"') hydro = ppl.query('technology == "Reservoir"') - bus_id = ppl["bus"] + if snakemake.params.alternative_clustering: + bus_id = ppl["region_id"] + else: + bus_id = ppl["bus"] inflow_idx = ror.index.union(hydro.index) if not inflow_idx.empty: diff --git a/scripts/add_export.py b/scripts/add_export.py index ba7b2442e..d8a860b82 100644 --- a/scripts/add_export.py +++ b/scripts/add_export.py @@ -47,13 +47,13 @@ def select_ports(n): logger.error( "No export ports chosen, please add ports to the file data/export_ports.csv" ) - gadm_level = snakemake.params.gadm_level + gadm_layer_id = snakemake.params.gadm_layer_id - ports["gadm_{}".format(gadm_level)] = ports[["x", "y", "country"]].apply( + ports["gadm_{}".format(gadm_layer_id)] = ports[["x", "y", "country"]].apply( lambda port: locate_bus( port[["x", "y"]], port["country"], - gadm_level, + gadm_layer_id, snakemake.input["shapes_path"], snakemake.params.alternative_clustering, ), @@ -62,7 +62,7 @@ def select_ports(n): # TODO: revise if ports quantity and property by shape become relevant # drop duplicated entries - gcol = "gadm_{}".format(gadm_level) + gcol = "gadm_{}".format(gadm_layer_id) ports_sel = ports.loc[~ports[gcol].duplicated(keep="first")].set_index(gcol) # Select the hydrogen buses based on nodes with ports diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 2c317c305..34b6c6a6a 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -57,7 +57,13 @@ import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging, create_logger +from _helpers import ( + configure_logging, + create_logger, + lossy_bidirectional_links, + override_component_attrs, + set_length_based_efficiency, +) from add_electricity import ( _add_missing_carriers_from_costs, add_nice_carrier_names, @@ -225,7 +231,7 @@ def attach_stores(n, costs, config): ) -def attach_hydrogen_pipelines(n, costs, config): +def attach_hydrogen_pipelines(n, costs, config, transmission_efficiency): elec_opts = config["electricity"] ext_carriers = elec_opts["extendable_carriers"] as_stores = ext_carriers.get("Store", []) @@ -261,10 +267,15 @@ def attach_hydrogen_pipelines(n, costs, config): p_nom_extendable=True, length=h2_links.length.values, capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length, - efficiency=costs.at["H2 pipeline", "efficiency"], carrier="H2 pipeline", ) + # split the pipeline into two unidirectional links to properly apply transmission losses in both directions. + lossy_bidirectional_links(n, "H2 pipeline") + + # set the pipelines efficiency and the electricity required by the pipeline for compression + set_length_based_efficiency(n, "H2 pipeline", " H2", transmission_efficiency) + if __name__ == "__main__": if "snakemake" not in globals(): @@ -274,8 +285,10 @@ def attach_hydrogen_pipelines(n, costs, config): configure_logging(snakemake) - n = pypsa.Network(snakemake.input.network) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + transmission_efficiency = snakemake.params.transmission_efficiency config = snakemake.config costs = load_costs( @@ -287,7 +300,7 @@ def attach_hydrogen_pipelines(n, costs, config): attach_storageunits(n, costs, config) attach_stores(n, costs, config) - attach_hydrogen_pipelines(n, costs, config) + attach_hydrogen_pipelines(n, costs, config, transmission_efficiency) add_nice_carrier_names(n, config=snakemake.config) diff --git a/scripts/base_network.py b/scripts/base_network.py index 65d640d44..2c4aa6afd 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -489,9 +489,8 @@ def base_network( transformers = _load_transformers_from_osm(inputs.osm_transformers, buses) converters = _load_converters_from_osm(inputs.osm_converters, buses) - lines_ac = lines[lines.tag_frequency.astype(float) != 0].copy() - lines_dc = lines[lines.tag_frequency.astype(float) == 0].copy() - + lines_ac = lines[~lines.dc].copy() + lines_dc = lines[lines.dc].copy() lines_ac = _set_electrical_parameters_lines(lines_config, voltages_config, lines_ac) lines_dc = _set_electrical_parameters_dc_lines( @@ -523,20 +522,6 @@ def base_network( result_type="reduce", ) n.import_components_from_dataframe(lines_ac, "Line") - # The columns which names starts with "bus" are mixed up with the third-bus specification - # when executing additional_linkports() - lines_dc.drop( - labels=[ - "bus0_lon", - "bus0_lat", - "bus1_lon", - "bus1_lat", - "bus_0_coors", - "bus_1_coors", - ], - axis=1, - inplace=True, - ) n.import_components_from_dataframe(lines_dc, "Link") n.import_components_from_dataframe(transformers, "Transformer") diff --git a/scripts/build_base_industry_totals.py b/scripts/build_base_industry_totals.py index e1147dcb1..977f95ea8 100644 --- a/scripts/build_base_industry_totals.py +++ b/scripts/build_base_industry_totals.py @@ -117,9 +117,7 @@ def create_industry_base_totals(df): renaming_dit = transaction.set_index("Transaction")["clean_name"].to_dict() clean_industry_list = list(transaction.clean_name.unique()) - unsd_path = ( - os.path.dirname(snakemake.input["energy_totals_base"]) + "/demand/unsd/data/" - ) + unsd_path = snakemake.input.unsd_export_path # Get the files from the path provided in the OP all_files = list(Path(unsd_path).glob("*.txt")) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index 21b39fc96..f9f4775fb 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -256,6 +256,26 @@ def get_gadm_shape( crs=country_shapes.crs, ).dropna(axis="index", subset=["geometry"]) + if snakemake.params.alternative_clustering: + # determine isolated buses + n.determine_network_topology() + non_isolated_buses = n.buses.duplicated(subset=["sub_network"], keep=False) + isolated_buses = n.buses[~non_isolated_buses].index + non_isolated_regions = onshore_regions[ + ~onshore_regions.name.isin(isolated_buses) + ] + isolated_regions = onshore_regions[onshore_regions.name.isin(isolated_buses)] + + # Combine regions while prioritizing non-isolated ones + onshore_regions = pd.concat( + [non_isolated_regions, isolated_regions] + ).drop_duplicates("shape_id", keep="first") + + if len(onshore_regions) < len(gadm_country): + logger.warning( + f"The number of remaining of buses are less than the number of administrative clusters suggested!" + ) + onshore_regions = pd.concat([onshore_regions], ignore_index=True).to_file( snakemake.output.regions_onshore ) diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index 48d7ed8c6..02a650150 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -21,7 +21,7 @@ gpd_version = StrictVersion(gpd.__version__) -def map_industry_to_buses(df, countries, gadm_level, shapes_path, gadm_clustering): +def map_industry_to_buses(df, countries, gadm_layer_id, shapes_path, gadm_clustering): """ Load hotmaps database of industrial sites and map onto bus regions. Build industrial demand... Change name and add other functions. @@ -31,18 +31,18 @@ def map_industry_to_buses(df, countries, gadm_level, shapes_path, gadm_clusterin Change hotmaps to more descriptive name, etc. """ df = df[df.country.isin(countries)] - df["gadm_{}".format(gadm_level)] = df[["x", "y", "country"]].apply( + df["gadm_{}".format(gadm_layer_id)] = df[["x", "y", "country"]].apply( lambda site: locate_bus( site[["x", "y"]].astype("float"), site["country"], - gadm_level, + gadm_layer_id, shapes_path, gadm_clustering, ), axis=1, ) - return df.set_index("gadm_" + str(gadm_level)) + return df.set_index("gadm_" + str(gadm_layer_id)) def build_nodal_distribution_key( @@ -131,7 +131,7 @@ def match_technology(df): regions = gpd.read_file(snakemake.input.regions_onshore) shapes_path = snakemake.input.shapes_path - gadm_level = snakemake.params.gadm_level + gadm_layer_id = snakemake.params.gadm_layer_id countries = snakemake.params.countries gadm_clustering = snakemake.params.alternative_clustering @@ -178,7 +178,7 @@ def match_technology(df): industrial_database = map_industry_to_buses( geo_locs[geo_locs.quality != "unavailable"], countries, - gadm_level, + gadm_layer_id, shapes_path, gadm_clustering, ) diff --git a/scripts/build_osm_network.py b/scripts/build_osm_network.py index af1ca22a9..91a66ef5f 100644 --- a/scripts/build_osm_network.py +++ b/scripts/build_osm_network.py @@ -26,6 +26,23 @@ logger = create_logger(__name__) +# Keep only a predefined set of columns, as otherwise conflicts are possible +# e.g. the columns which names starts with "bus" are mixed up with +# the third-bus specification when executing additional_linkports() +LINES_COLUMNS = [ + "line_id", + "circuits", + "voltage", + "bus0", + "bus1", + "length", + "dc", + "country", + "geometry", + "bounds", +] + + def line_endings_to_bus_conversion(lines): # Assign to every line a start and end point @@ -716,6 +733,7 @@ def built_network( countries_config, geo_crs, distance_crs, + lines_cols_standard, force_ac=False, ): logger.info("Stage 1/5: Read input data") @@ -780,6 +798,8 @@ def built_network( if not os.path.exists(outputs["lines"]): os.makedirs(os.path.dirname(outputs["lines"]), exist_ok=True) + lines = lines[lines_cols_standard] + to_csv_nafix(lines, outputs["lines"]) # Generate CSV to_csv_nafix(converters, outputs["converters"]) # Generate CSV to_csv_nafix(transformers, outputs["transformers"]) # Generate CSV @@ -815,5 +835,6 @@ def built_network( countries, geo_crs, distance_crs, + lines_cols_standard=LINES_COLUMNS, force_ac=force_ac, ) diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index 4bf22e524..b1719108d 100644 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -337,13 +337,16 @@ def replace_natural_gas_technology(df: pd.DataFrame): else: config["main_query"] = "" - ppl = ( - pm.powerplants(from_url=False, update=True, config_update=config) - .powerplant.fill_missing_decommissioning_years() - .query('Fueltype not in ["Solar", "Wind"] and Country in @countries_names') - .powerplant.convert_country_to_alpha2() - .pipe(replace_natural_gas_technology) - ) + if snakemake.config["electricity"]["custom_powerplants"] != "replace": + ppl = ( + pm.powerplants(from_url=False, update=True, config_update=config) + .powerplant.fill_missing_decommissioning_years() + .query('Fueltype not in ["Solar", "Wind"] and Country in @countries_names') + .powerplant.convert_country_to_alpha2() + .pipe(replace_natural_gas_technology) + ) + else: + ppl = pd.DataFrame() ppl = add_custom_powerplants( ppl, snakemake.input, snakemake.config diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 8c16bce09..77427534d 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -356,6 +356,9 @@ def rescale_hydro(plants, runoff, normalize_using_yearly, normalization_year): logger.info("No bus has installed hydro plants, ignoring normalization.") return runoff + if snakemake.params.alternative_clustering: + plants = plants.set_index("shape_id") + years_statistics = normalize_using_yearly.index if isinstance(years_statistics, pd.DatetimeIndex): years_statistics = years_statistics.year @@ -530,6 +533,24 @@ def create_scaling_factor( # the region should be restricted for non-hydro technologies, as the hydro potential is calculated across hydrobasins which may span beyond the region of the country cutout = filter_cutout_region(cutout, regions) + if snakemake.params.alternative_clustering: + regions = gpd.GeoDataFrame( + regions.reset_index() + .groupby("shape_id") + .agg( + { + "x": "mean", + "y": "mean", + "country": "first", + "geometry": "first", + "bus": "first", + } + ) + .reset_index() + .set_index("bus"), + crs=regions.crs, + ) + buses = regions.index func = getattr(cutout, resource.pop("method")) @@ -556,10 +577,17 @@ def create_scaling_factor( # select busbar whose location (p) belongs to at least one hydrobasin geometry # if extendable option is true, all buses are included # otherwise only where hydro powerplants are available are considered - filter_bus_to_consider = regions.index.map( - lambda bus_id: config.get("extendable", False) - | (bus_id in hydro_ppls.bus.values) - ) + if snakemake.params.alternative_clustering: + filter_bus_to_consider = regions.index.map( + lambda bus_id: config.get("extendable", False) + | (bus_id in hydro_ppls.region_id.values) + ) + ### TODO: quickfix. above case and the below case should by unified + if snakemake.params.alternative_clustering == False: + filter_bus_to_consider = regions.index.map( + lambda bus_id: config.get("extendable", False) + | (bus_id in hydro_ppls.bus.values) + ) bus_to_consider = regions.index[filter_bus_to_consider] # identify subset of buses within the hydrobasins @@ -577,10 +605,17 @@ def create_scaling_factor( columns={"x": "lon", "y": "lat", "country": "countries"} ).loc[bus_in_hydrobasins, ["lon", "lat", "countries", "shape_id"]] - resource["plants"]["installed_hydro"] = [ - True if (bus_id in hydro_ppls.bus.values) else False - for bus_id in resource["plants"].index - ] + # TODO: these cases shall be fixed by restructuring the alternative clustering procedure + if snakemake.params.alternative_clustering == False: + resource["plants"]["installed_hydro"] = [ + True if (bus_id in hydro_ppls.bus.values) else False + for bus_id in resource["plants"].index + ] + else: + resource["plants"]["installed_hydro"] = [ + True if (bus_id in hydro_ppls.region_id.values) else False + for bus_id in resource["plants"].shape_id.values + ] # get normalization before executing runoff normalization = None @@ -596,6 +631,8 @@ def create_scaling_factor( else: # otherwise perform the calculations inflow = correction_factor * func(capacity_factor=True, **resource) + if snakemake.params.alternative_clustering: + inflow["plant"] = regions.shape_id.loc[inflow["plant"]].values if "clip_min_inflow" in config: inflow = inflow.where(inflow >= config["clip_min_inflow"], 0) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index a09829302..a4247e64b 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -22,6 +22,7 @@ BASE_DIR, configure_logging, create_logger, + save_to_geojson, three_2_two_digits_country, two_2_three_digits_country, two_digits_2_name_country, @@ -305,23 +306,6 @@ def country_cover(country_shapes, eez_shapes=None, out_logging=False, distance=0 return africa_shape -def save_to_geojson(df, fn): - if os.path.exists(fn): - os.unlink(fn) # remove file if it exists - if not isinstance(df, gpd.GeoDataFrame): - df = gpd.GeoDataFrame(dict(geometry=df)) - - # save file if the GeoDataFrame is non-empty - if df.shape[0] > 0: - df = df.reset_index() - schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} - df.to_file(fn, driver="GeoJSON", schema=schema, engine="fiona") - else: - # create empty file to avoid issues with snakemake - with open(fn, "w") as fp: - pass - - def load_EEZ(countries_codes, geo_crs, EEZ_gpkg="./data/eez/eez_v11.gpkg"): """ Function to load the database of the Exclusive Economic Zones. diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 2eae2f4d8..a5111f8f6 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -121,19 +121,21 @@ :align: center """ +import logging import os from functools import reduce import geopandas as gpd +import linopy import numpy as np import pandas as pd -import pyomo.environ as po import pypsa from _helpers import ( REGION_COLS, configure_logging, create_logger, get_aggregation_strategies, + update_config_dictionary, update_p_nom_max, ) from add_electricity import load_costs @@ -335,50 +337,28 @@ def distribute_clusters( distribution_factor.sum(), 1.0, rtol=1e-3 ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {distribution_factor.sum()}." - m = po.ConcreteModel() - - def n_bounds(model, *n_id): - """ - Create a function that makes a bound pair for pyomo. - - Use n_bounds(model, n_id) if N is Single-Index - Use n_bounds(model, *n_id) if N is Multi-Index - Example: https://pyomo.readthedocs.io/en/stable/pyomo_modeling_components/Variables.html - - Returns - ------- - bounds = A function (or Python object) that gives a (lower,upper) bound pair i.e.(1,10) for the variable - """ - return (1, N[n_id]) - - m.n = po.Var(list(distribution_factor.index), bounds=n_bounds, domain=po.Integers) - m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) - m.objective = po.Objective( - expr=sum( - (m.n[i] - distribution_factor.loc[i] * n_clusters) ** 2 - for i in distribution_factor.index - ), - sense=po.minimize, + m = linopy.Model() + clusters = m.add_variables( + lower=1, upper=N, coords=[distribution_factor.index], name="n", integer=True ) - opt = po.SolverFactory(solver_name) - if not opt.has_capability("quadratic_objective"): - logger.warning( - f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`." + m.add_constraints(clusters.sum() == n_clusters, name="tot") + # leave out constant in objective (L * n_clusters) ** 2 as it doesn't affect the clustering results + m.objective = ( + clusters * clusters - 2 * clusters * distribution_factor * n_clusters + ).sum() + if solver_name == "gurobi": + logging.getLogger("gurobipy").propagate = False + elif solver_name not in ["scip", "cplex", "xpress", "copt", "mosek"]: + logger.error( + f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `scip`." ) - opt = po.SolverFactory("ipopt") - - results = opt.solve(m) - assert ( - results["Solver"][0]["Status"] == "ok" - ), f"Solver returned non-optimally: {results}" + solver_name = "scip" + m.solve(solver_name=solver_name) + return m.solution["n"].to_series().astype(int) - return ( - pd.Series(m.n.get_values(), index=distribution_factor.index).round().astype(int) - ) - -def busmap_for_gadm_clusters(inputs, n, gadm_level, geo_crs, country_list): +def busmap_for_gadm_clusters(inputs, n, gadm_layer_id, geo_crs, country_list): gdf = gpd.read_file(inputs.gadm_shapes) def locate_bus(coords, co): @@ -394,12 +374,12 @@ def locate_bus(coords, co): ]["GADM_ID"].item() buses = n.buses - buses["gadm_{}".format(gadm_level)] = buses[["x", "y", "country"]].apply( + buses["gadm_{}".format(gadm_layer_id)] = buses[["x", "y", "country"]].apply( lambda bus: locate_bus(bus[["x", "y"]], bus["country"]), axis=1 ) buses["gadm_subnetwork"] = ( - buses["gadm_{}".format(gadm_level)] + "_" + buses["carrier"].astype(str) + buses["gadm_{}".format(gadm_layer_id)] + "_" + buses["carrier"].astype(str) ) busmap = buses["gadm_subnetwork"] @@ -575,9 +555,10 @@ def clustering_for_n_clusters( extended_link_costs=0, focus_weights=None, ): - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) if not isinstance(custom_busmap, pd.Series): if alternative_clustering: @@ -603,12 +584,14 @@ def clustering_for_n_clusters( clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -630,14 +613,6 @@ def clustering_for_n_clusters( return clustering -def save_to_geojson(s, fn): - if os.path.exists(fn): - os.unlink(fn) - df = s.reset_index() - schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} - df.to_file(fn, driver="GeoJSON", schema=schema, engine="fiona") - - def cluster_regions(busmaps, inputs, output): busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) @@ -703,9 +678,7 @@ def cluster_regions(busmaps, inputs, output): # Fast-path if no clustering is necessary busmap = n.buses.index.to_series() linemap = n.lines.index.to_series() - clustering = pypsa.clustering.spatial.Clustering( - n, busmap, linemap, linemap, pd.Series(dtype="O") - ) + clustering = pypsa.clustering.spatial.Clustering(n, busmap, linemap) elif len(n.buses) < n_clusters: logger.error( f"Desired number of clusters ({n_clusters}) higher than the number of buses ({len(n.buses)})" @@ -727,14 +700,25 @@ def consense(x): ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" return v - aggregation_strategies = snakemake.params.cluster_options.get( - "aggregation_strategies", {} + aggregation_strategies = snakemake.params.aggregation_strategies + + # Aggregation strategies must be set for all columns + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="lines", + dict_to_use={"v_nom": "first", "geometry": "first", "bounds": "first"}, ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="buses", + dict_to_use={ + "v_nom": "first", + "lat": "mean", + "lon": "mean", + "country": "first", + }, + ) + custom_busmap = False # snakemake.params.custom_busmap custom busmap is depreciated https://github.com/pypsa-meets-earth/pypsa-earth/pull/694 if custom_busmap: busmap = pd.read_csv( diff --git a/scripts/prepare_energy_totals.py b/scripts/prepare_energy_totals.py index be635483e..e9382544f 100644 --- a/scripts/prepare_energy_totals.py +++ b/scripts/prepare_energy_totals.py @@ -53,9 +53,7 @@ def calculate_end_values(df): investment_year = int(snakemake.wildcards.planning_horizons) demand_sc = snakemake.wildcards.demand # loading the demand scenrario wildcard - base_energy_totals = read_csv_nafix( - os.path.join(BASE_DIR, "data/energy_totals_base.csv"), index_col=0 - ) + base_energy_totals = read_csv_nafix(snakemake.input.unsd_paths, index_col=0) growth_factors_cagr = read_csv_nafix( snakemake.input.growth_factors_cagr, index_col=0 ) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 880dafef7..cc1dc099b 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1022,23 +1022,23 @@ def add_aviation(n, cost): airports = pd.read_csv(snakemake.input.airports, keep_default_na=False) airports = airports[airports.country.isin(countries)] - gadm_level = options["gadm_level"] + gadm_layer_id = snakemake.config["build_shape_options"]["gadm_layer_id"] - airports["gadm_{}".format(gadm_level)] = airports[["x", "y", "country"]].apply( + airports["gadm_{}".format(gadm_layer_id)] = airports[["x", "y", "country"]].apply( lambda airport: locate_bus( airport[["x", "y"]], airport["country"], - gadm_level, + gadm_layer_id, snakemake.input.shapes_path, snakemake.config["cluster_options"]["alternative_clustering"], ), axis=1, ) # To change 3 country code to 2 - # airports["gadm_{}".format(gadm_level)] = airports["gadm_{}".format(gadm_level)].apply( + # airports["gadm_{}".format(gadm_layer_id)] = airports["gadm_{}".format(gadm_layer_id)].apply( # lambda cocode: three_2_two_digits_country(cocode[:3]) + " " + cocode[4:-2]) - airports = airports.set_index("gadm_{}".format(gadm_level)) + airports = airports.set_index("gadm_{}".format(gadm_layer_id)) ind = pd.DataFrame(n.buses.index[n.buses.carrier == "AC"]) @@ -1290,7 +1290,7 @@ def add_shipping(n, costs): ).squeeze() ports = ports[ports.country.isin(countries)] - gadm_level = options["gadm_level"] + gadm_layer_id = snakemake.config["build_shape_options"]["gadm_layer_id"] all_navigation = ["total international navigation", "total domestic navigation"] @@ -1307,18 +1307,18 @@ def add_shipping(n, costs): options["shipping_hydrogen_share"], demand_sc + "_" + str(investment_year) ) - ports["gadm_{}".format(gadm_level)] = ports[["x", "y", "country"]].apply( + ports["gadm_{}".format(gadm_layer_id)] = ports[["x", "y", "country"]].apply( lambda port: locate_bus( port[["x", "y"]], port["country"], - gadm_level, + gadm_layer_id, snakemake.input["shapes_path"], snakemake.config["cluster_options"]["alternative_clustering"], ), axis=1, ) - ports = ports.set_index("gadm_{}".format(gadm_level)) + ports = ports.set_index("gadm_{}".format(gadm_layer_id)) ind = pd.DataFrame(n.buses.index[n.buses.carrier == "AC"]) ind = ind.set_index(n.buses.index[n.buses.carrier == "AC"]) diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 92c3dd340..9e3576388 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -96,13 +96,12 @@ from _helpers import ( configure_logging, create_logger, - get_aggregation_strategies, + update_config_dictionary, update_p_nom_max, ) from add_electricity import load_costs from cluster_network import cluster_regions, clustering_for_n_clusters from pypsa.clustering.spatial import ( - aggregategenerators, aggregateoneport, busmap_by_stubs, get_clustering_from_busmap, @@ -276,11 +275,15 @@ def replace_components(n, c, df, pnl): _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) - _, generator_strategies = get_aggregation_strategies(aggregation_strategies) + generator_strategies = aggregation_strategies["generators"] carriers = set(n.generators.carrier) - set(exclude_carriers) - generators, generators_pnl = aggregategenerators( - n, busmap, carriers=carriers, custom_strategies=generator_strategies + generators, generators_pnl = aggregateoneport( + n, + busmap, + "Generator", + carriers=carriers, + custom_strategies=generator_strategies, ) replace_components(n, "Generator", generators, generators_pnl) @@ -588,19 +591,22 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): if not dist.empty: busmap.loc[buses_i] = dist.idxmin(1) - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) return clustering.network, busmap @@ -848,19 +854,22 @@ def merge_into_network(n, threshold, aggregation_strategies=dict()): if (busmap.index == busmap).all(): return n, n.buses.index.to_series() - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -934,19 +943,22 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): if (busmap.index == busmap).all(): return n, n.buses.index.to_series() - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -976,14 +988,25 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): "exclude_carriers", [] ) hvdc_as_lines = snakemake.params.electricity["hvdc_as_lines"] - aggregation_strategies = snakemake.params.cluster_options.get( - "aggregation_strategies", {} + aggregation_strategies = snakemake.params.aggregation_strategies + + # Aggregation strategies must be set for all columns + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="lines", + dict_to_use={"v_nom": "first", "geometry": "first", "bounds": "first"}, ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="buses", + dict_to_use={ + "v_nom": "first", + "lat": "mean", + "lon": "mean", + "country": "first", + }, + ) + n, trafo_map = simplify_network_to_base_voltage(n, linetype, base_voltage) Nyears = n.snapshot_weightings.objective.sum() / 8760 @@ -1088,7 +1111,7 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): solver_name, cluster_config.get("algorithm", "hac"), cluster_config.get("feature", None), - aggregation_strategies, + aggregation_strategies=aggregation_strategies, ) busmaps.append(cluster_map) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 88bdc6738..2541ae9b2 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -52,15 +52,15 @@ linear optimal power flow (plus investment planning) is provided in the `documentation of PyPSA `_. -The optimization is based on the ``pyomo=False`` setting in the :func:`network.lopf` and :func:`pypsa.linopf.ilopf` function. -Additionally, some extra constraints specified in :mod:`prepare_network` are added. +The optimization is based on the :func:`network.optimize` function. +Additionally, some extra constraints specified in :mod:`prepare_network` and :mod:`solve_network` are added. Solving the network in multiple iterations is motivated through the dependence of transmission line capacities and impedances on values of corresponding flows. As lines are expanded their electrical parameters change, which renders the optimisation bilinear even if the power flow equations are linearized. To retain the computational advantage of continuous linear programming, a sequential linear programming technique is used, where in between iterations the line impedances are updated. -Details (and errors made through this heuristic) are discussed in the paper +Details (and errors introduced through this heuristic) are discussed in the paper - Fabian Neumann and Tom Brown. `Heuristics for Transmission Expansion Planning in Low-Carbon Energy System Models `_), *16th International Conference on the European Energy Market*, 2019. `arXiv:1907.10548 `_. @@ -85,24 +85,18 @@ import numpy as np import pandas as pd import pypsa +import xarray as xr from _helpers import configure_logging, create_logger, override_component_attrs +from linopy import merge from pypsa.descriptors import get_switchable_as_dense as get_as_dense -from pypsa.linopf import ( - define_constraints, - define_variables, - get_var, - ilopf, - join_exprs, - linexpr, - network_lopf, -) -from pypsa.linopt import define_constraints, get_var, join_exprs, linexpr +from pypsa.optimization.abstract import optimize_transmission_expansion_iteratively +from pypsa.optimization.optimize import optimize logger = create_logger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) -def prepare_network(n, solve_opts): +def prepare_network(n, solve_opts, config): if "clip_p_max_pu" in solve_opts: for df in ( n.generators_t.p_max_pu, @@ -159,6 +153,25 @@ def prepare_network(n, solve_opts): def add_CCL_constraints(n, config): + """ + Add CCL (country & carrier limit) constraint to the network. + + Add minimum and maximum levels of generator nominal capacity per carrier + for individual countries. Opts and path for agg_p_nom_minmax.csv must be defined + in config.yaml. Default file is available at data/agg_p_nom_minmax.csv. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + scenario: + opts: [Co2L-CCL-24H] + electricity: + agg_p_nom_limits: data/agg_p_nom_minmax.csv + """ agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits") try: @@ -174,32 +187,57 @@ def add_CCL_constraints(n, config): ) gen_country = n.generators.bus.map(n.buses.country) - # cc means country and carrier - p_nom_per_cc = ( - pd.DataFrame( - { - "p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))), - "country": gen_country, - "carrier": n.generators.carrier, - } + capacity_variable = n.model["Generator-p_nom"] + + lhs = [] + ext_carriers = n.generators.query("p_nom_extendable").carrier.unique() + for c in ext_carriers: + ext_carrier = n.generators.query("p_nom_extendable and carrier == @c") + country_grouper = ( + ext_carrier.bus.map(n.buses.country) + .rename_axis("Generator-ext") + .rename("country") ) - .dropna(subset=["p_nom"]) - .groupby(["country", "carrier"]) - .p_nom.apply(join_exprs) + ext_carrier_per_country = capacity_variable.loc[ + country_grouper.index + ].groupby_sum(country_grouper) + lhs.append(ext_carrier_per_country) + lhs = merge(lhs, dim=pd.Index(ext_carriers, name="carrier")) + + min_matrix = agg_p_nom_minmax["min"].to_xarray().unstack().reindex_like(lhs) + max_matrix = agg_p_nom_minmax["max"].to_xarray().unstack().reindex_like(lhs) + + n.model.add_constraints( + lhs >= min_matrix, name="agg_p_nom_min", mask=min_matrix.notnull() + ) + n.model.add_constraints( + lhs <= max_matrix, name="agg_p_nom_max", mask=max_matrix.notnull() ) - minimum = agg_p_nom_minmax["min"].dropna() - if not minimum.empty: - minconstraint = define_constraints( - n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min" - ) - maximum = agg_p_nom_minmax["max"].dropna() - if not maximum.empty: - maxconstraint = define_constraints( - n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max" - ) def add_EQ_constraints(n, o, scaling=1e-1): + """ + Add equity constraints to the network. + + Currently this is only implemented for the electricity sector only. + + Opts must be specified in the config.yaml. + + Parameters + ---------- + n : pypsa.Network + o : str + + Example + ------- + scenario: + opts: [Co2L-EQ0.7-24h] + + Require each country or node to on average produce a minimal share + of its total electricity consumption itself. Example: EQ0.7c demands each country + to produce on average at least 70% of its consumption; EQ0.7 demands + each node to produce on average at least 70% of its consumption. + """ float_regex = "[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) if o[-1] == "c": @@ -220,99 +258,149 @@ def add_EQ_constraints(n, o, scaling=1e-1): ) inflow = inflow.reindex(load.index).fillna(0.0) rhs = scaling * (level * load - inflow) + dispatch_variable = n.model["Generator-p"] lhs_gen = ( - linexpr( - (n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T) - ) - .T.groupby(ggrouper, axis=1) - .apply(join_exprs) + (dispatch_variable * (n.snapshot_weightings.generators * scaling)) + .groupby(ggrouper.to_xarray()) + .sum() + .sum("snapshot") ) - lhs_spill = ( - linexpr( - ( - -n.snapshot_weightings.stores * scaling, - get_var(n, "StorageUnit", "spill").T, - ) + # the current formulation implies that the available hydro power is (inflow - spillage) + # it implies efficiency_dispatch is 1 which is not quite general + # see https://github.com/pypsa-meets-earth/pypsa-earth/issues/1245 for possible improvements + if not n.storage_units_t.inflow.empty: + spillage_variable = n.model["StorageUnit-spill"] + lhs_spill = ( + (spillage_variable * (-n.snapshot_weightings.stores * scaling)) + .groupby_sum(sgrouper) + .groupby(sgrouper.to_xarray()) + .sum() + .sum("snapshot") ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("") - lhs = lhs_gen + lhs_spill - define_constraints(n, lhs, ">=", rhs, "equity", "min") + lhs = lhs_gen + lhs_spill + else: + lhs = lhs_gen + n.model.add_constraints(lhs >= rhs, name="equity_min") def add_BAU_constraints(n, config): - ext_c = n.generators.query("p_nom_extendable").carrier.unique() - mincaps = pd.Series( - config["electricity"].get("BAU_mincapacities", {key: 0 for key in ext_c}) - ) - lhs = ( - linexpr((1, get_var(n, "Generator", "p_nom"))) - .groupby(n.generators.carrier) - .apply(join_exprs) - ) - define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps") - - maxcaps = pd.Series( - config["electricity"].get("BAU_maxcapacities", {key: np.inf for key in ext_c}) - ) - lhs = ( - linexpr((1, get_var(n, "Generator", "p_nom"))) - .groupby(n.generators.carrier) - .apply(join_exprs) - ) - define_constraints(n, lhs, "<=", maxcaps[lhs.index], "Carrier", "bau_maxcaps") + """ + Add a per-carrier minimal overall capacity. + + BAU_mincapacities and opts must be adjusted in the config.yaml. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + scenario: + opts: [Co2L-BAU-24h] + electricity: + BAU_mincapacities: + solar: 0 + onwind: 0 + OCGT: 100000 + offwind-ac: 0 + offwind-dc: 0 + Which sets minimum expansion across all nodes e.g. in Europe to 100GW. + OCGT bus 1 + OCGT bus 2 + ... > 100000 + """ + mincaps = pd.Series(config["electricity"]["BAU_mincapacities"]) + p_nom = n.model["Generator-p_nom"] + ext_i = n.generators.query("p_nom_extendable") + ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext")) + lhs = p_nom.groupby(ext_carrier_i).sum() + rhs = mincaps[lhs.indexes["carrier"]].rename_axis("carrier") + n.model.add_constraints(lhs >= rhs, name="bau_mincaps") def add_SAFE_constraints(n, config): - peakdemand = ( - 1.0 + config["electricity"]["SAFE_reservemargin"] - ) * n.loads_t.p_set.sum(axis=1).max() - conv_techs = config["plotting"]["conv_techs"] + """ + Add a capacity reserve margin of a certain fraction above the peak demand. + Renewable generators and storage do not contribute. Ignores network. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + config.yaml requires to specify opts: + + scenario: + opts: [Co2L-SAFE-24h] + electricity: + SAFE_reservemargin: 0.1 + Which sets a reserve margin of 10% above the peak demand. + """ + peakdemand = n.loads_t.p_set.sum(axis=1).max() + margin = 1.0 + config["electricity"]["SAFE_reservemargin"] + reserve_margin = peakdemand * margin + conventional_carriers = config["electricity"]["conventional_carriers"] + ext_gens_i = n.generators.query( + "carrier in @conventional_carriers & p_nom_extendable" + ).index + capacity_variable = n.model["Generator-p_nom"] + p_nom = n.model["Generator-p_nom"].loc[ext_gens_i] + lhs = p_nom.sum() exist_conv_caps = n.generators.query( - "~p_nom_extendable & carrier in @conv_techs" + "~p_nom_extendable & carrier in @conventional_carriers" ).p_nom.sum() - ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index - lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum() - rhs = peakdemand - exist_conv_caps - define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap") + rhs = reserve_margin - exist_conv_caps + n.model.add_constraints(lhs >= rhs, name="safe_mintotalcap") -def add_operational_reserve_margin_constraint(n, config): +def add_operational_reserve_margin_constraint(n, sns, config): + """ + Build reserve margin constraints based on the formulation + as suggested in GenX + https://energy.mit.edu/wp-content/uploads/2017/10/Enhanced-Decision-Support-for-a-Changing-Electricity-Landscape.pdf + It implies that the reserve margin also accounts for optimal + dispatch of distributed energy resources (DERs) and demand response + which is a novel feature of GenX. + """ reserve_config = config["electricity"]["operational_reserve"] EPSILON_LOAD = reserve_config["epsilon_load"] EPSILON_VRES = reserve_config["epsilon_vres"] CONTINGENCY = reserve_config["contingency"] # Reserve Variables - reserve = get_var(n, "Generator", "r") - lhs = linexpr((1, reserve)).sum(1) + n.model.add_variables( + 0, np.inf, coords=[sns, n.generators.index], name="Generator-r" + ) + reserve = n.model["Generator-r"] + summed_reserve = reserve.sum("Generator") # Share of extendable renewable capacities ext_i = n.generators.query("p_nom_extendable").index vres_i = n.generators_t.p_max_pu.columns if not ext_i.empty and not vres_i.empty: capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)] - renewable_capacity_variables = get_var(n, "Generator", "p_nom")[ - vres_i.intersection(ext_i) - ] - lhs += linexpr( - (-EPSILON_VRES * capacity_factor, renewable_capacity_variables) - ).sum(1) + p_nom_vres = ( + n.model["Generator-p_nom"] + .loc[vres_i.intersection(ext_i)] + .rename({"Generator-ext": "Generator"}) + ) + lhs = summed_reserve + ( + p_nom_vres * (-EPSILON_VRES * xr.DataArray(capacity_factor)) + ).sum("Generator") - # Total demand at t - demand = n.loads_t.p.sum(1) + # Total demand per t + demand = get_as_dense(n, "Load", "p_set").sum(axis=1) # VRES potential of non extendable generators capacity_factor = n.generators_t.p_max_pu[vres_i.difference(ext_i)] renewable_capacity = n.generators.p_nom[vres_i.difference(ext_i)] - potential = (capacity_factor * renewable_capacity).sum(1) + potential = (capacity_factor * renewable_capacity).sum(axis=1) # Right-hand-side rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY - define_constraints(n, lhs, ">=", rhs, "Reserve margin") + n.model.add_constraints(lhs >= rhs, name="reserve_margin") def update_capacity_constraint(n): @@ -320,171 +408,153 @@ def update_capacity_constraint(n): ext_i = n.generators.query("p_nom_extendable").index fix_i = n.generators.query("not p_nom_extendable").index - dispatch = get_var(n, "Generator", "p") - reserve = get_var(n, "Generator", "r") + dispatch = n.model["Generator-p"] + reserve = n.model["Generator-r"] capacity_fixed = n.generators.p_nom[fix_i] p_max_pu = get_as_dense(n, "Generator", "p_max_pu") - lhs = linexpr((1, dispatch), (1, reserve)) + lhs = dispatch + reserve + # TODO check if `p_max_pu[ext_i]` is safe for empty `ext_i` and drop if cause in case if not ext_i.empty: - capacity_variable = get_var(n, "Generator", "p_nom") - lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex( - columns=gen_i, fill_value="" + capacity_variable = n.model["Generator-p_nom"].rename( + {"Generator-ext": "Generator"} ) + lhs = dispatch + reserve - capacity_variable * xr.DataArray(p_max_pu[ext_i]) rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0) - define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint") + n.model.add_constraints(lhs <= rhs, name="gen_updated_capacity_constraint") def add_operational_reserve_margin(n, sns, config): """ - Build reserve margin constraints based on the formulation given in - https://genxproject.github.io/GenX/dev/core/#Reserves. + Parameters + ---------- + n : pypsa.Network + sns: pd.DatetimeIndex + config : dict + + Example: + -------- + config.yaml requires to specify operational_reserve: + operational_reserve: # like https://genxproject.github.io/GenX/dev/core/#Reserves + activate: true + epsilon_load: 0.02 # percentage of load at each snapshot + epsilon_vres: 0.02 # percentage of VRES at each snapshot + contingency: 400000 # MW """ - define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index]) - - add_operational_reserve_margin_constraint(n, config) + add_operational_reserve_margin_constraint(n, sns, config) update_capacity_constraint(n) def add_battery_constraints(n): - nodes = n.buses.index[n.buses.carrier == "battery"] - if nodes.empty or ("Link", "p_nom") not in n.variables.index: + """ + Add constraint ensuring that charger = discharger, i.e. + 1 * charger_size - efficiency * discharger_size = 0 + """ + if not n.links.p_nom_extendable.any(): return - link_p_nom = get_var(n, "Link", "p_nom") - lhs = linexpr( - (1, link_p_nom[nodes + " charger"]), - ( - -n.links.loc[nodes + " discharger", "efficiency"].values, - link_p_nom[nodes + " discharger"].values, - ), + + discharger_bool = n.links.index.str.contains("battery discharger") + charger_bool = n.links.index.str.contains("battery charger") + + dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index + chargers_ext = n.links[charger_bool].query("p_nom_extendable").index + + eff = n.links.efficiency[dischargers_ext].values + lhs = ( + n.model["Link-p_nom"].loc[chargers_ext] + - n.model["Link-p_nom"].loc[dischargers_ext] * eff ) - define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") + n.model.add_constraints(lhs == 0, name="Link-charger_ratio") -def add_RES_constraints(n, res_share): - lgrouper = n.loads.bus.map(n.buses.country) - ggrouper = n.generators.bus.map(n.buses.country) - sgrouper = n.storage_units.bus.map(n.buses.country) - cgrouper = n.links.bus0.map(n.buses.country) + +def add_RES_constraints(n, res_share, config): + """ + The constraint ensures that a predefined share of power is generated + by renewable sources + + Parameters + ---------- + n : pypsa.Network + res_share: float + config : dict + """ logger.warning( - "The add_RES_constraints functionality is still work in progress. " + "The add_RES_constraints() is still work in progress. " "Unexpected results might be incurred, particularly if " "temporal clustering is applied or if an unexpected change of technologies " - "is subject to the obtimisation." + "is subject to future improvements." ) + renew_techs = config["electricity"]["renewable_carriers"] + + charger = ["H2 electrolysis", "battery charger"] + discharger = ["H2 fuel cell", "battery discharger"] + + ren_gen = n.generators.query("carrier in @renew_techs") + ren_stores = n.storage_units.query("carrier in @renew_techs") + ren_charger = n.links.query("carrier in @charger") + ren_discharger = n.links.query("carrier in @discharger") + + gens_i = ren_gen.index + stores_i = ren_stores.index + charger_i = ren_charger.index + discharger_i = ren_discharger.index + + stores_t_weights = n.snapshot_weightings.stores + + lgrouper = n.loads.bus.map(n.buses.country) + ggrouper = ren_gen.bus.map(n.buses.country) + sgrouper = ren_stores.bus.map(n.buses.country) + cgrouper = ren_charger.bus0.map(n.buses.country) + dgrouper = ren_discharger.bus0.map(n.buses.country) + load = ( n.snapshot_weightings.generators @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum() ) - rhs = res_share * load - res_techs = [ - "solar", - "onwind", - "offwind-dc", - "offwind-ac", - "battery", - "hydro", - "ror", - ] - charger = ["H2 electrolysis", "battery charger"] - discharger = ["H2 fuel cell", "battery discharger"] - - gens_i = n.generators.query("carrier in @res_techs").index - stores_i = n.storage_units.query("carrier in @res_techs").index - charger_i = n.links.query("carrier in @charger").index - discharger_i = n.links.query("carrier in @discharger").index - # Generators lhs_gen = ( - linexpr( - (n.snapshot_weightings.generators, get_var(n, "Generator", "p")[gens_i].T) - ) - .T.groupby(ggrouper, axis=1) - .apply(join_exprs) + (n.model["Generator-p"].loc[:, gens_i] * n.snapshot_weightings.generators) + .groupby(ggrouper.to_xarray()) + .sum() ) # StorageUnits - lhs_dispatch = ( - ( - linexpr( - ( - n.snapshot_weightings.stores, - get_var(n, "StorageUnit", "p_dispatch")[stores_i].T, - ) - ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") + store_disp_expr = ( + n.model["StorageUnit-p_dispatch"].loc[:, stores_i] * stores_t_weights ) - - lhs_store = ( - ( - linexpr( - ( - -n.snapshot_weightings.stores, - get_var(n, "StorageUnit", "p_store")[stores_i].T, - ) - ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") + store_expr = n.model["StorageUnit-p_store"].loc[:, stores_i] * stores_t_weights + charge_expr = n.model["Link-p"].loc[:, charger_i] * stores_t_weights.apply( + lambda r: r * n.links.loc[charger_i].efficiency ) + discharge_expr = n.model["Link-p"].loc[:, discharger_i] * stores_t_weights.apply( + lambda r: r * n.links.loc[discharger_i].efficiency + ) + + lhs_dispatch = store_disp_expr.groupby(sgrouper).sum() + lhs_store = store_expr.groupby(sgrouper).sum() # Stores (or their resp. Link components) # Note that the variables "p0" and "p1" currently do not exist. # Thus, p0 and p1 must be derived from "p" (which exists), taking into account the link efficiency. - lhs_charge = ( - ( - linexpr( - ( - -n.snapshot_weightings.stores, - get_var(n, "Link", "p")[charger_i].T, - ) - ) - .T.groupby(cgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") - ) + lhs_charge = charge_expr.groupby(cgrouper).sum() - lhs_discharge = ( - ( - linexpr( - ( - n.snapshot_weightings.stores.apply( - lambda r: r * n.links.loc[discharger_i].efficiency - ), - get_var(n, "Link", "p")[discharger_i], - ) - ) - .groupby(cgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") - ) + lhs_discharge = discharge_expr.groupby(cgrouper).sum() - # signs of resp. terms are coded in the linexpr. - # todo: for links (lhs_charge and lhs_discharge), account for snapshot weightings - lhs = lhs_gen + lhs_dispatch + lhs_store + lhs_charge + lhs_discharge + lhs = lhs_gen + lhs_dispatch - lhs_store - lhs_charge + lhs_discharge - define_constraints(n, lhs, "=", rhs, "RES share") + n.model.add_constraints(lhs == rhs, name="res_share") def add_land_use_constraint(n): @@ -548,16 +618,21 @@ def _add_land_use_constraint_m(n): def add_h2_network_cap(n, cap): h2_network = n.links.loc[n.links.carrier == "H2 pipeline"] - if h2_network.index.empty or ("Link", "p_nom") not in n.variables.index: + if h2_network.index.empty: return - h2_network_cap = get_var(n, "Link", "p_nom") - subset_index = h2_network.index.intersection(h2_network_cap.index) - lhs = linexpr( - (h2_network.loc[subset_index, "length"], h2_network_cap[subset_index]) + h2_network_cap = n.model["Link-p_nom"] + h2_network_cap_index = h2_network_cap.indexes["Link-ext"] + subset_index = h2_network.index.intersection(h2_network_cap_index) + diff_index = h2_network_cap_index.difference(subset_index) + if len(diff_index) > 0: + logger.warning( + f"Impossible to set a limit for H2 pipelines extension for the following links: {diff_index}" + ) + lhs = ( + h2_network_cap.loc[subset_index] * h2_network.loc[subset_index, "length"] ).sum() - # lhs = linexpr((1, h2_network_cap[h2_network.index])).sum() rhs = cap * 1000 - define_constraints(n, lhs, "<=", rhs, "h2_network_cap") + n.model.add_constraints(lhs <= rhs, name="h2_network_cap") def H2_export_yearly_constraint(n): @@ -578,9 +653,10 @@ def H2_export_yearly_constraint(n): index=n.snapshots, columns=res_index, ) - res = join_exprs( - linexpr((weightings, get_var(n, "Generator", "p")[res_index])) - ) # single line sum + capacity_variable = n.model["Generator-p"] + + # single line sum + res = (weightings * capacity_variable.loc[res_index]).sum() load_ind = n.loads[n.loads.carrier == "AC"].index.intersection( n.loads_t.p_set.columns @@ -608,7 +684,7 @@ def H2_export_yearly_constraint(n): else: rhs = h2_export * (1 / 0.7) - con = define_constraints(n, lhs, ">=", rhs, "H2ExportConstraint", "RESproduction") + n.model.add_constraints(lhs >= rhs, name="H2ExportConstraint-RESproduction") def monthly_constraints(n, n_ref): @@ -631,15 +707,17 @@ def monthly_constraints(n, n_ref): index=n.snapshots, columns=res_index, ) + capacity_variable = n.model["Generator-p"] - res = linexpr((weightings, get_var(n, "Generator", "p")[res_index])).sum( - axis=1 - ) # single line sum + # single line sum + res = (weightings * capacity_variable[res_index]).sum(axis=1) res = res.groupby(res.index.month).sum() - electrolysis = get_var(n, "Link", "p")[ + link_p = n.model["Link-p"] + electrolysis = link_p.loc[ n.links.index[n.links.index.str.contains("H2 Electrolysis")] ] + weightings_electrolysis = pd.DataFrame( np.outer( n.snapshot_weightings["generators"], [1.0] * len(electrolysis.columns) @@ -648,7 +726,7 @@ def monthly_constraints(n, n_ref): columns=electrolysis.columns, ) - elec_input = linexpr((-allowed_excess * weightings_electrolysis, electrolysis)).sum( + elec_input = ((-allowed_excess * weightings_electrolysis) * electrolysis).sum( axis=1 ) @@ -671,16 +749,16 @@ def monthly_constraints(n, n_ref): for i in range(len(res.index)): lhs = res.iloc[i] + "\n" + elec_input.iloc[i] rhs = res_ref.iloc[i] + elec_input_ref.iloc[i] - con = define_constraints( - n, lhs, ">=", rhs, f"RESconstraints_{i}", f"REStarget_{i}" + n.model.add_constraints( + lhs >= rhs, name=f"RESconstraints_{i}-REStarget_{i}" ) else: for i in range(len(res.index)): lhs = res.iloc[i] + "\n" + elec_input.iloc[i] - con = define_constraints( - n, lhs, ">=", 0.0, f"RESconstraints_{i}", f"REStarget_{i}" + n.model.add_constraints( + lhs >= 0.0, name=f"RESconstraints_{i}-REStarget_{i}" ) # else: # logger.info("ignoring H2 export constraint as wildcard is set to 0") @@ -701,84 +779,72 @@ def add_chp_constraints(n): electric = n.links.index[electric_bool] heat = n.links.index[heat_bool] - electric_ext = n.links.index[electric_bool & n.links.p_nom_extendable] - heat_ext = n.links.index[heat_bool & n.links.p_nom_extendable] + electric_ext = n.links[electric_bool].query("p_nom_extendable").index + heat_ext = n.links[heat_bool].query("p_nom_extendable").index - electric_fix = n.links.index[electric_bool & ~n.links.p_nom_extendable] - heat_fix = n.links.index[heat_bool & ~n.links.p_nom_extendable] + electric_fix = n.links[electric_bool].query("~p_nom_extendable").index + heat_fix = n.links[heat_bool].query("~p_nom_extendable").index - link_p = get_var(n, "Link", "p") + p = n.model["Link-p"] # dimension: [time, link] + # output ratio between heat and electricity and top_iso_fuel_line for extendable if not electric_ext.empty: - link_p_nom = get_var(n, "Link", "p_nom") - - # ratio of output heat to electricity set by p_nom_ratio - lhs = linexpr( - ( - n.links.loc[electric_ext, "efficiency"] - * n.links.loc[electric_ext, "p_nom_ratio"], - link_p_nom[electric_ext], - ), - (-n.links.loc[heat_ext, "efficiency"].values, link_p_nom[heat_ext].values), - ) + p_nom = n.model["Link-p_nom"] - define_constraints(n, lhs, "=", 0, "chplink", "fix_p_nom_ratio") - - # top_iso_fuel_line for extendable - lhs = linexpr( - (1, link_p[heat_ext]), - (1, link_p[electric_ext].values), - (-1, link_p_nom[electric_ext].values), + lhs = ( + p_nom.loc[electric_ext] + * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values + - p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values ) + n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio") - define_constraints(n, lhs, "<=", 0, "chplink", "top_iso_fuel_line_ext") + rename = {"Link-ext": "Link"} + lhs = ( + p.loc[:, electric_ext] + + p.loc[:, heat_ext] + - p_nom.rename(rename).loc[electric_ext] + ) + n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext") + # top_iso_fuel_line for fixed if not electric_fix.empty: - # top_iso_fuel_line for fixed - lhs = linexpr((1, link_p[heat_fix]), (1, link_p[electric_fix].values)) - - rhs = n.links.loc[electric_fix, "p_nom"].values - - define_constraints(n, lhs, "<=", rhs, "chplink", "top_iso_fuel_line_fix") + lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix] + rhs = n.links.p_nom[electric_fix] + n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix") + # back-pressure if not electric.empty: - # backpressure - lhs = linexpr( - ( - n.links.loc[electric, "c_b"].values * n.links.loc[heat, "efficiency"], - link_p[heat], - ), - (-n.links.loc[electric, "efficiency"].values, link_p[electric].values), + lhs = ( + p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) + - p.loc[:, electric] * n.links.efficiency[electric] ) - - define_constraints(n, lhs, "<=", 0, "chplink", "backpressure") + n.model.add_constraints(lhs <= rhs, name="chplink-backpressure") def add_co2_sequestration_limit(n, sns): co2_stores = n.stores.loc[n.stores.carrier == "co2 stored"].index - if co2_stores.empty or ("Store", "e") not in n.variables.index: + if co2_stores.empty: return - vars_final_co2_stored = get_var(n, "Store", "e").loc[sns[-1], co2_stores] + vars_final_co2_stored = n.model["Store-e"].loc[sns[-1], co2_stores] - lhs = linexpr((1, vars_final_co2_stored)).sum() + lhs = (1 * vars_final_co2_stored).sum() rhs = ( n.config["sector"].get("co2_sequestration_potential", 5) * 1e6 ) # TODO change 200 limit (Europe) name = "co2_sequestration_limit" - define_constraints( - n, lhs, "<=", rhs, "GlobalConstraint", "mu", axes=pd.Index([name]), spec=name - ) + + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") def set_h2_colors(n): - blue_h2 = get_var(n, "Link", "p")[ + blue_h2 = n.model["Link-p"].loc[ n.links.index[n.links.index.str.contains("blue H2")] ] - pink_h2 = get_var(n, "Link", "p")[ + pink_h2 = n.model["Link-p"].loc[ n.links.index[n.links.index.str.contains("pink H2")] ] @@ -810,16 +876,16 @@ def set_h2_colors(n): columns=pink_h2.columns, ) - total_blue = linexpr((weightings_blue, blue_h2)).sum().sum() + total_blue = (weightings_blue * blue_h2).sum().sum() - total_pink = linexpr((weightings_pink, pink_h2)).sum().sum() + total_pink = (weightings_pink * pink_h2).sum().sum() rhs_blue = load_h2 * snakemake.config["sector"]["hydrogen"]["blue_share"] rhs_pink = load_h2 * snakemake.config["sector"]["hydrogen"]["pink_share"] - define_constraints(n, total_blue, "=", rhs_blue, "blue_h2_share") + n.model.add_constraints(total_blue == rhs_blue, name="blue_h2_share") - define_constraints(n, total_pink, "=", rhs_pink, "pink_h2_share") + n.model.add_constraints(total_pink == rhs_pink, name="pink_h2_share") def add_existing(n): @@ -854,6 +920,59 @@ def add_existing(n): n.generators.loc[tech_index, tech] = existing_res +def add_lossy_bidirectional_link_constraints(n: pypsa.components.Network) -> None: + """ + Ensures that the two links simulating a bidirectional_link are extended the same amount. + """ + + if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns: + return + + # ensure that the 'reversed' column is boolean and identify all link carriers that have 'reversed' links + n.links["reversed"] = n.links.reversed.fillna(0).astype(bool) + carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841 + + # get the indices of all forward links (non-reversed), that have a reversed counterpart + forward_i = n.links.query( + "carrier in @carriers and ~reversed and p_nom_extendable" + ).index + + # function to get backward (reversed) indices corresponding to forward links + # this function is required to properly interact with the myopic naming scheme + def get_backward_i(forward_i): + return pd.Index( + [ + ( + re.sub(r"-(\d{4})$", r"-reversed-\1", s) + if re.search(r"-\d{4}$", s) + else s + "-reversed" + ) + for s in forward_i + ] + ) + + # get the indices of all backward links (reversed) + backward_i = get_backward_i(forward_i) + + # get the p_nom optimization variables for the links using the get_var function + links_p_nom = n.model["Link-p_nom"] + + # only consider forward and backward links that are present in the optimization variables + subset_forward = forward_i.intersection(links_p_nom.indexes["Link-ext"]) + subset_backward = backward_i.intersection(links_p_nom.indexes["Link-ext"]) + + # ensure we have a matching number of forward and backward links + if len(subset_forward) != len(subset_backward): + raise ValueError("Mismatch between forward and backward links.") + + # define the lefthand side of the constrain p_nom (forward) - p_nom (backward) = 0 + # this ensures that the forward links always have the same maximum nominal power as their backward counterpart + lhs = links_p_nom.loc[backward_i] - links_p_nom.loc[forward_i] + + # add the constraint to the PySPA model + n.model.add_constraints(lhs == 0, name="Link-bidirectional_sync") + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to @@ -876,11 +995,17 @@ def extra_functionality(n, snapshots): for o in opts: if "RES" in o: res_share = float(re.findall("[0-9]*\.?[0-9]+$", o)[0]) - add_RES_constraints(n, res_share) + add_RES_constraints(n, res_share, config) for o in opts: if "EQ" in o: add_EQ_constraints(n, o) + add_battery_constraints(n) + add_lossy_bidirectional_link_constraints(n) + + if snakemake.config["sector"]["chp"]: + logger.info("setting CHP constraints") + add_chp_constraints(n) if ( snakemake.config["policy_config"]["hydrogen"]["temporal_matching"] @@ -927,40 +1052,45 @@ def extra_functionality(n, snapshots): add_co2_sequestration_limit(n, snapshots) -def solve_network(n, config, solving={}, opts="", **kwargs): +def solve_network(n, config, solving, **kwargs): set_of_options = solving["solver"]["options"] cf_solving = solving["options"] - solver_options = solving["solver_options"][set_of_options] if set_of_options else {} - solver_name = solving["solver"]["name"] + kwargs["solver_options"] = ( + solving["solver_options"][set_of_options] if set_of_options else {} + ) + kwargs["solver_name"] = solving["solver"]["name"] + kwargs["extra_functionality"] = extra_functionality - track_iterations = cf_solving.get("track_iterations", False) - min_iterations = cf_solving.get("min_iterations", 4) - max_iterations = cf_solving.get("max_iterations", 6) + skip_iterations = cf_solving.get("skip_iterations", False) + if not n.lines.s_nom_extendable.any(): + skip_iterations = True + logger.info("No expandable lines found. Skipping iterative solving.") # add to network for extra_functionality n.config = config n.opts = opts - if cf_solving.get("skip_iterations", False): - network_lopf( - n, - solver_name=solver_name, - solver_options=solver_options, - extra_functionality=extra_functionality, - **kwargs, - ) + if skip_iterations: + status, condition = n.optimize(**kwargs) else: - ilopf( - n, - solver_name=solver_name, - solver_options=solver_options, - track_iterations=track_iterations, - min_iterations=min_iterations, - max_iterations=max_iterations, - extra_functionality=extra_functionality, - **kwargs, + kwargs["track_iterations"] = (cf_solving.get("track_iterations", False),) + kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),) + kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),) + status, condition = n.optimize.optimize_transmission_expansion_iteratively( + **kwargs ) + + if status != "ok": # and not rolling_horizon: + logger.warning( + f"Solving status '{status}' with termination condition '{condition}'" + ) + if "infeasible" in condition: + labels = n.model.compute_infeasibilities() + logger.info(f"Labels:\n{labels}") + n.model.print_infeasibilities() + raise RuntimeError("Solving status 'infeasible'") + return n @@ -969,28 +1099,28 @@ def solve_network(n, config, solving={}, opts="", **kwargs): from _helpers import mock_snakemake snakemake = mock_snakemake( - "solve_network", + "solve_sector_network", simpl="", clusters="4", ll="c1", opts="Co2L-4H", + planning_horizons="2030", + discountrate="0.071", + demand="AB", + sopts="144H", + h2export="120", + configfile="config.tutorial.yaml", ) configure_logging(snakemake) - tmpdir = snakemake.params.solving.get("tmpdir") - if tmpdir is not None: - Path(tmpdir).mkdir(parents=True, exist_ok=True) opts = snakemake.wildcards.opts.split("-") - solving = snakemake.params.solving + solve_opts = snakemake.config["solving"]["options"] is_sector_coupled = "sopts" in snakemake.wildcards.keys() - if is_sector_coupled: - overrides = override_component_attrs(snakemake.input.overrides) - n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) - else: - n = pypsa.Network(snakemake.input.network) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) if snakemake.params.augmented_line_connection.get("add_to_snakefile"): n.lines.loc[n.lines.index.str.contains("new"), "s_nom_min"] = ( @@ -1016,15 +1146,13 @@ def solve_network(n, config, solving={}, opts="", **kwargs): else: n_ref = None - n = prepare_network(n, solving["options"]) + n = prepare_network(n, solve_opts, config=solve_opts) n = solve_network( n, config=snakemake.config, - solving=solving, - opts=opts, - solver_dir=tmpdir, - solver_logfile=snakemake.log.solver, + solving=snakemake.params.solving, + log_fn=snakemake.log.solver, ) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/test/config.custom.yaml b/test/config.custom.yaml index a596a932d..5cd36f44f 100644 --- a/test/config.custom.yaml +++ b/test/config.custom.yaml @@ -3,7 +3,7 @@ # SPDX-License-Identifier: CC0-1.0 ### CHANGES TO CONFIG.TUTORIAL.YAML ### -version: 0.5.0 +version: 0.6.0 run: name: "custom" diff --git a/test/config.landlock.yaml b/test/config.landlock.yaml index 913211f29..fc267e829 100644 --- a/test/config.landlock.yaml +++ b/test/config.landlock.yaml @@ -3,7 +3,7 @@ # SPDX-License-Identifier: CC0-1.0 ### CHANGES TO CONFIG.TUTORIAL.YAML ### -version: 0.5.0 +version: 0.6.0 countries: ["BW"] diff --git a/test/config.monte_carlo.yaml b/test/config.monte_carlo.yaml index 034dd51cd..c35dde51f 100644 --- a/test/config.monte_carlo.yaml +++ b/test/config.monte_carlo.yaml @@ -3,7 +3,7 @@ # SPDX-License-Identifier: CC0-1.0 ### CHANGES TO CONFIG.TUTORIAL.YAML ### -version: 0.5.0 +version: 0.6.0 monte_carlo: options: diff --git a/test/config.sector.yaml b/test/config.sector.yaml index abc250e0c..71daf0c62 100644 --- a/test/config.sector.yaml +++ b/test/config.sector.yaml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -version: 0.5.0 +version: 0.6.0 tutorial: true run: @@ -20,8 +20,8 @@ countries: ["NG", "BJ"] electricity: extendable_carriers: - Store: [] - Link: [] + Store: [H2] + Link: [H2 pipeline] co2limit: 7.75e7 @@ -43,7 +43,6 @@ existing_capacities: sector: solid_biomass_potential: 10 # TWh/a, Potential of whole modelled area - gadm_level: 2 snapshots: # arguments to pd.date_range start: "2013-03-1" diff --git a/test/config.test_myopic.yaml b/test/config.test_myopic.yaml index 05f3c71a1..abe000aba 100644 --- a/test/config.test_myopic.yaml +++ b/test/config.test_myopic.yaml @@ -2,13 +2,12 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -version: 0.5.0 +version: 0.6.0 logging_level: INFO tutorial: true results_dir: results/ summary_dir: results/ -costs_dir: data/ #TODO change to the equivalent of technology data run: name: "test_myopic" # use this to keep track of runs with different settings @@ -99,7 +98,7 @@ custom_data: costs: # Costs used in PyPSA-Earth-Sec. Year depends on the wildcard planning_horizon in the scenario section - version: v0.6.2 + version: v0.10.0 lifetime: 25 #default lifetime # From a Lion Hirth paper, also reflects average of Noothout et al 2016 discountrate: [0.071] #, 0.086, 0.111] @@ -288,7 +287,6 @@ sector: co2_network: true co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe co2_sequestration_cost: 10 #EUR/tCO2 for sequestration of CO2 - hydrogen_underground_storage: true shipping_hydrogen_liquefaction: false shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011 @@ -303,7 +301,6 @@ sector: NZ_2050: 0.36 DF_2050: 0.12 - gadm_level: 1 h2_cavern: true marginal_cost_storage: 0 methanation: true