Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cluster heat nodes #257

Merged
merged 8 commits into from
Jan 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions config.default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ sector:
2040: 0.6
2050: 1.0
district_heating_loss: 0.15
cluster_heat_buses: false # cluster residential and service heat buses to one to save memory
bev_dsm_restriction_value: 0.75 #Set to 0 for no restriction on BEV DSM
bev_dsm_restriction_time: 7 #Time at which SOC of BEV has to be dsm_restriction_value
transport_heating_deadband_upper: 20.
Expand Down
6 changes: 5 additions & 1 deletion scripts/add_existing_baseyear.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import pypsa
import yaml

from prepare_sector_network import prepare_costs, define_spatial
from prepare_sector_network import prepare_costs, define_spatial, cluster_heat_buses
from helper import override_component_attrs, update_config_with_sector_opts

from types import SimpleNamespace
Expand Down Expand Up @@ -563,5 +563,9 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years_heat,
ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime)

if options.get("cluster_heat_buses", False):
cluster_heat_buses(n)

n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
fneum marked this conversation as resolved.
Show resolved Hide resolved

n.export_to_netcdf(snakemake.output[0])
105 changes: 105 additions & 0 deletions scripts/prepare_sector_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,17 @@
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from networkx.algorithms import complement
from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe

import logging
logger = logging.getLogger(__name__)

from types import SimpleNamespace
spatial = SimpleNamespace()

from packaging.version import Version, parse
pd_version = parse(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {}

def define_spatial(nodes, options):
"""
Expand Down Expand Up @@ -2593,6 +2597,98 @@ def limit_individual_line_extension(n, maxext):
n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext


aggregate_dict = {
"p_nom": "sum",
"s_nom": "sum",
"v_nom": "max",
"v_mag_pu_max": "min",
"v_mag_pu_min": "max",
"p_nom_max": "sum",
"s_nom_max": "sum",
"p_nom_min": "sum",
"s_nom_min": "sum",
'v_ang_min': "max",
"v_ang_max":"min",
"terrain_factor":"mean",
"num_parallel": "sum",
"p_set": "sum",
"e_initial": "sum",
"e_nom": "sum",
"e_nom_max": "sum",
"e_nom_min": "sum",
"state_of_charge_initial": "sum",
"state_of_charge_set": "sum",
"inflow": "sum",
"p_max_pu": "first",
"x": "mean",
"y": "mean"
}

def cluster_heat_buses(n):
"""Cluster residential and service heat buses to one representative bus.
This can be done to save memory and speed up optimisation
"""

def define_clustering(attributes, aggregate_dict):
"""Define how attributes should be clustered.
Input:
attributes : pd.Index()
aggregate_dict: dictionary (key: name of attribute, value
clustering method)

Returns:
agg : clustering dictionary
"""
keys = attributes.intersection(aggregate_dict.keys())
agg = dict(
zip(
attributes.difference(keys),
["first"] * len(df.columns.difference(keys)),
)
)
for key in keys:
agg[key] = aggregate_dict[key]
return agg

logger.info("Cluster residential and service heat buses.")
components = ["Bus", "Carrier", "Generator", "Link", "Load", "Store"]

for c in n.iterate_components(components):
df = c.df
cols = df.columns[df.columns.str.contains("bus") | (df.columns=="carrier")]

# rename columns and index
df[cols] = (df[cols]
.apply(lambda x: x.str.replace("residential ","")
.str.replace("services ", ""), axis=1))
df = df.rename(index=lambda x: x.replace("residential ","")
.replace("services ", ""))


# cluster heat nodes
# static dataframe
agg = define_clustering(df.columns, aggregate_dict)
df = df.groupby(level=0).agg(agg, **agg_group_kwargs)
# time-varying data
pnl = c.pnl
agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict)
for k in pnl.keys():
pnl[k].rename(columns=lambda x: x.replace("residential ","")
.replace("services ", ""), inplace=True)
pnl[k] = (
pnl[k]
.groupby(level=0, axis=1)
.agg(agg[k], **agg_group_kwargs)
)

# remove unclustered assets of service/residential
to_drop = c.df.index.difference(df.index)
n.mremove(c.name, to_drop)
# add clustered assets
to_add = df.index.difference(c.df.index)
import_components_from_dataframe(n, df.loc[to_add], c.name)


def apply_time_segmentation(n, segments, solver_name="cbc",
overwrite_time_dependent=True):
"""Aggregating time series to segments with different lengths
Expand Down Expand Up @@ -2674,6 +2770,7 @@ def set_temporal_aggregation(n, opts, solver_name):
n = apply_time_segmentation(n, segments, solver_name=solver_name)
break
return n

#%%
if __name__ == "__main__":
if 'snakemake' not in globals():
Expand Down Expand Up @@ -2820,5 +2917,13 @@ def set_temporal_aggregation(n, opts, solver_name):
if options['electricity_grid_connection']:
add_electricity_grid_connection(n, costs)

first_year_myopic = ((snakemake.config["foresight"] == 'myopic') and
(snakemake.config["scenario"]["planning_horizons"][0]==investment_year))

if options.get("cluster_heat_buses", False) and not first_year_myopic:
cluster_heat_buses(n)


n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))

n.export_to_netcdf(snakemake.output[0])