Skip to content

Commit

Permalink
Merge pull request #236 from PyPSA/transport
Browse files Browse the repository at this point in the history
separate rules for transport demand and nodal energy totals
  • Loading branch information
lisazeyen authored May 2, 2022
2 parents cdd5628 + dc997c6 commit f4e1d28
Show file tree
Hide file tree
Showing 5 changed files with 348 additions and 213 deletions.
36 changes: 33 additions & 3 deletions Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -431,15 +431,45 @@ else:
build_retro_cost_output = {}


rule build_population_weighted_energy_totals:
input:
energy_totals='resources/energy_totals.csv',
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv"
output: "resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=2000
script: "scripts/build_population_weighted_energy_totals.py"


rule build_transport_demand:
input:
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
pop_weighted_energy_totals="resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_data='resources/transport_data.csv',
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
output:
transport_demand="resources/transport_demand_s{simpl}_{clusters}.csv",
transport_data="resources/transport_data_s{simpl}_{clusters}.csv",
avail_profile="resources/avail_profile_s{simpl}_{clusters}.csv",
dsm_profile="resources/dsm_profile_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=2000
script: "scripts/build_transport_demand.py"


rule prepare_sector_network:
input:
overrides="data/override_component_attrs",
network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'),
energy_totals_name='resources/energy_totals.csv',
pop_weighted_energy_totals="resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_demand="resources/transport_demand_s{simpl}_{clusters}.csv",
transport_data="resources/transport_data_s{simpl}_{clusters}.csv",
avail_profile="resources/avail_profile_s{simpl}_{clusters}.csv",
dsm_profile="resources/dsm_profile_s{simpl}_{clusters}.csv",
co2_totals_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv',
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
biomass_potentials='resources/biomass_potentials_s{simpl}_{clusters}.csv',
heat_profile="data/heat_load_profile_BDEW.csv",
costs=CDIR + "costs_{planning_horizons}.csv",
Expand Down
22 changes: 22 additions & 0 deletions scripts/build_population_weighted_energy_totals.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""Build population-weighted energy totals."""

import pandas as pd

if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_population_weighted_energy_totals',
simpl='',
clusters=48,
)

pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)

energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0)

nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.)
nodal_energy_totals.index = pop_layout.index
nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0)

nodal_energy_totals.to_csv(snakemake.output[0])
201 changes: 201 additions & 0 deletions scripts/build_transport_demand.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
"""Build transport demand."""

import pandas as pd
import numpy as np
import xarray as xr
from helper import generate_periodic_profiles


def build_nodal_transport_data(fn, pop_layout):

transport_data = pd.read_csv(fn, index_col=0)

nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0)
nodal_transport_data.index = pop_layout.index
nodal_transport_data["number cars"] = (
pop_layout["fraction"] * nodal_transport_data["number cars"]
)
nodal_transport_data.loc[
nodal_transport_data["average fuel efficiency"] == 0.0,
"average fuel efficiency",
] = transport_data["average fuel efficiency"].mean()

return nodal_transport_data


def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):

## Get overall demand curve for all vehicles

traffic = pd.read_csv(
traffic_fn, skiprows=2, usecols=["count"], squeeze=True
)

transport_shape = generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=traffic.values,
)
transport_shape = transport_shape / transport_shape.sum()

# electric motors are more efficient, so alter transport demand

plug_to_wheels_eta = options["bev_plug_to_wheel_efficiency"]
battery_to_wheels_eta = plug_to_wheels_eta * options["bev_charge_efficiency"]

efficiency_gain = (
nodal_transport_data["average fuel efficiency"] / battery_to_wheels_eta
)

# get heating demand for correction to demand time series
temperature = xr.open_dataarray(airtemp_fn).to_pandas()

# correction factors for vehicle heating
dd_ICE = transport_degree_factor(
temperature,
options["transport_heating_deadband_lower"],
options["transport_heating_deadband_upper"],
options["ICE_lower_degree_factor"],
options["ICE_upper_degree_factor"],
)

dd_EV = transport_degree_factor(
temperature,
options["transport_heating_deadband_lower"],
options["transport_heating_deadband_upper"],
options["EV_lower_degree_factor"],
options["EV_upper_degree_factor"],
)

# divide out the heating/cooling demand from ICE totals
# and multiply back in the heating/cooling demand for EVs
ice_correction = (transport_shape * (1 + dd_ICE)).sum() / transport_shape.sum()

energy_totals_transport = (
pop_weighted_energy_totals["total road"]
+ pop_weighted_energy_totals["total rail"]
- pop_weighted_energy_totals["electricity rail"]
)

transport = (
(transport_shape.multiply(energy_totals_transport) * 1e6 * Nyears)
.divide(efficiency_gain * ice_correction)
.multiply(1 + dd_EV)
)

return transport


def transport_degree_factor(
temperature,
deadband_lower=15,
deadband_upper=20,
lower_degree_factor=0.5,
upper_degree_factor=1.6,
):
"""
Work out how much energy demand in vehicles increases due to heating and cooling.
There is a deadband where there is no increase.
Degree factors are % increase in demand compared to no heating/cooling fuel consumption.
Returns per unit increase in demand for each place and time
"""

dd = temperature.copy()

dd[(temperature > deadband_lower) & (temperature < deadband_upper)] = 0.0

dT_lower = deadband_lower - temperature[temperature < deadband_lower]
dd[temperature < deadband_lower] = lower_degree_factor / 100 * dT_lower

dT_upper = temperature[temperature > deadband_upper] - deadband_upper
dd[temperature > deadband_upper] = upper_degree_factor / 100 * dT_upper

return dd


def bev_availability_profile(fn, snapshots, nodes, options):
"""
Derive plugged-in availability for passenger electric vehicles.
"""

traffic = pd.read_csv(fn, skiprows=2, usecols=["count"], squeeze=True)

avail_max = options["bev_avail_max"]
avail_mean = options["bev_avail_mean"]

avail = avail_max - (avail_max - avail_mean) * (traffic - traffic.min()) / (
traffic.mean() - traffic.min()
)

avail_profile = generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=avail.values,
)

return avail_profile


def bev_dsm_profile(snapshots, nodes, options):

dsm_week = np.zeros((24 * 7,))

dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[
"bev_dsm_restriction_value"
]

dsm_profile = generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=dsm_week,
)

return dsm_profile


if __name__ == "__main__":
if "snakemake" not in globals():
from helper import mock_snakemake

snakemake = mock_snakemake(
"build_transport_demand",
simpl="",
clusters=48,
)

pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)

nodes = pop_layout.index

pop_weighted_energy_totals = pd.read_csv(
snakemake.input.pop_weighted_energy_totals, index_col=0
)

options = snakemake.config["sector"]

snapshots = pd.date_range(freq='h', **snakemake.config["snapshots"], tz="UTC")

Nyears = 1

nodal_transport_data = build_nodal_transport_data(
snakemake.input.transport_data,
pop_layout
)

transport_demand = build_transport_demand(
snakemake.input.traffic_data_KFZ,
snakemake.input.temp_air_total,
nodes, nodal_transport_data
)

avail_profile = bev_availability_profile(
snakemake.input.traffic_data_Pkw,
snapshots, nodes, options
)

dsm_profile = bev_dsm_profile(snapshots, nodes, options)

nodal_transport_data.to_csv(snakemake.output.transport_data)
transport_demand.to_csv(snakemake.output.transport_demand)
avail_profile.to_csv(snakemake.output.avail_profile)
dsm_profile.to_csv(snakemake.output.dsm_profile)
24 changes: 23 additions & 1 deletion scripts/helper.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import pytz
import pandas as pd
from pathlib import Path
from pypsa.descriptors import Dict
Expand Down Expand Up @@ -100,4 +101,25 @@ def progress_retrieve(url, file):
def dlProgress(count, blockSize, totalSize):
pbar.update( int(count * blockSize * 100 / totalSize) )

urllib.request.urlretrieve(url, file, reporthook=dlProgress)
urllib.request.urlretrieve(url, file, reporthook=dlProgress)


def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
"""
Give a 24*7 long list of weekly hourly profiles, generate this for each
country for the period dt_index, taking account of time zones and summer time.
"""

weekly_profile = pd.Series(weekly_profile, range(24*7))

week_df = pd.DataFrame(index=dt_index, columns=nodes)

for node in nodes:
timezone = pytz.timezone(pytz.country_timezones[node[:2]][0])
tz_dt_index = dt_index.tz_convert(timezone)
week_df[node] = [24 * dt.weekday() + dt.hour for dt in tz_dt_index]
week_df[node] = week_df[node].map(weekly_profile)

week_df = week_df.tz_localize(localize)

return week_df
Loading

0 comments on commit f4e1d28

Please sign in to comment.