diff --git a/config.default.yaml b/config.default.yaml index ee1c5059..e165e895 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -160,6 +160,7 @@ sector: 2040: 0.6 2050: 1.0 district_heating_loss: 0.15 + cluster_heat_buses: false # cluster residential and service heat buses to one to save memory bev_dsm_restriction_value: 0.75 #Set to 0 for no restriction on BEV DSM bev_dsm_restriction_time: 7 #Time at which SOC of BEV has to be dsm_restriction_value transport_heating_deadband_upper: 20. diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 8f2fdf24..8e274d62 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -12,7 +12,7 @@ import pypsa import yaml -from prepare_sector_network import prepare_costs, define_spatial +from prepare_sector_network import prepare_costs, define_spatial, cluster_heat_buses from helper import override_component_attrs, update_config_with_sector_opts from types import SimpleNamespace @@ -563,5 +563,9 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years_heat, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime) + if options.get("cluster_heat_buses", False): + cluster_heat_buses(n) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index b6c052be..89d774ca 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -19,6 +19,7 @@ from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation from networkx.algorithms import complement from pypsa.geo import haversine_pts +from pypsa.io import import_components_from_dataframe import logging logger = logging.getLogger(__name__) @@ -26,6 +27,9 @@ from types import SimpleNamespace spatial = SimpleNamespace() +from packaging.version import Version, parse +pd_version = parse(pd.__version__) +agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {} def define_spatial(nodes, options): """ @@ -2593,6 +2597,98 @@ def limit_individual_line_extension(n, maxext): n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext +aggregate_dict = { + "p_nom": "sum", + "s_nom": "sum", + "v_nom": "max", + "v_mag_pu_max": "min", + "v_mag_pu_min": "max", + "p_nom_max": "sum", + "s_nom_max": "sum", + "p_nom_min": "sum", + "s_nom_min": "sum", + 'v_ang_min': "max", + "v_ang_max":"min", + "terrain_factor":"mean", + "num_parallel": "sum", + "p_set": "sum", + "e_initial": "sum", + "e_nom": "sum", + "e_nom_max": "sum", + "e_nom_min": "sum", + "state_of_charge_initial": "sum", + "state_of_charge_set": "sum", + "inflow": "sum", + "p_max_pu": "first", + "x": "mean", + "y": "mean" +} + +def cluster_heat_buses(n): + """Cluster residential and service heat buses to one representative bus. + This can be done to save memory and speed up optimisation + """ + + def define_clustering(attributes, aggregate_dict): + """Define how attributes should be clustered. + Input: + attributes : pd.Index() + aggregate_dict: dictionary (key: name of attribute, value + clustering method) + + Returns: + agg : clustering dictionary + """ + keys = attributes.intersection(aggregate_dict.keys()) + agg = dict( + zip( + attributes.difference(keys), + ["first"] * len(df.columns.difference(keys)), + ) + ) + for key in keys: + agg[key] = aggregate_dict[key] + return agg + + logger.info("Cluster residential and service heat buses.") + components = ["Bus", "Carrier", "Generator", "Link", "Load", "Store"] + + for c in n.iterate_components(components): + df = c.df + cols = df.columns[df.columns.str.contains("bus") | (df.columns=="carrier")] + + # rename columns and index + df[cols] = (df[cols] + .apply(lambda x: x.str.replace("residential ","") + .str.replace("services ", ""), axis=1)) + df = df.rename(index=lambda x: x.replace("residential ","") + .replace("services ", "")) + + + # cluster heat nodes + # static dataframe + agg = define_clustering(df.columns, aggregate_dict) + df = df.groupby(level=0).agg(agg, **agg_group_kwargs) + # time-varying data + pnl = c.pnl + agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict) + for k in pnl.keys(): + pnl[k].rename(columns=lambda x: x.replace("residential ","") + .replace("services ", ""), inplace=True) + pnl[k] = ( + pnl[k] + .groupby(level=0, axis=1) + .agg(agg[k], **agg_group_kwargs) + ) + + # remove unclustered assets of service/residential + to_drop = c.df.index.difference(df.index) + n.mremove(c.name, to_drop) + # add clustered assets + to_add = df.index.difference(c.df.index) + import_components_from_dataframe(n, df.loc[to_add], c.name) + + def apply_time_segmentation(n, segments, solver_name="cbc", overwrite_time_dependent=True): """Aggregating time series to segments with different lengths @@ -2674,6 +2770,7 @@ def set_temporal_aggregation(n, opts, solver_name): n = apply_time_segmentation(n, segments, solver_name=solver_name) break return n + #%% if __name__ == "__main__": if 'snakemake' not in globals(): @@ -2820,5 +2917,13 @@ def set_temporal_aggregation(n, opts, solver_name): if options['electricity_grid_connection']: add_electricity_grid_connection(n, costs) + first_year_myopic = ((snakemake.config["foresight"] == 'myopic') and + (snakemake.config["scenario"]["planning_horizons"][0]==investment_year)) + + if options.get("cluster_heat_buses", False) and not first_year_myopic: + cluster_heat_buses(n) + + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.export_to_netcdf(snakemake.output[0])