Skip to content

Commit

Permalink
Merge pull request #54 from sedos-project/feature/adapt_tests
Browse files Browse the repository at this point in the history
Special parameter mapper and Test Goal improvements
  • Loading branch information
FelixMau authored Nov 8, 2023
2 parents 77383ad + f82c7ee commit d5feec3
Show file tree
Hide file tree
Showing 20 changed files with 53,946 additions and 560 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,7 @@ collections/*
tests/collections/
__pycache__/
/structures/
/tests/_files/build_datapackage_test/
/tests/_files/tabular_datapackage_hack_a_thon/data/
/tests/_files/tabular_datapackage_hack_a_thon/datapackage.json
/tests/_files/tabular_datapackage_mininmal_example/
6 changes: 5 additions & 1 deletion data_adapter_oemof/adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ class Adapter:
facade: Facade = None
extra_fields = (
Field(name="name", type=str),
Field(name="type", type=str),
Field(name="region", type=str),
Field(name="year", type=int),
)
Expand Down Expand Up @@ -132,6 +131,11 @@ class StorageAdapter(Adapter):

type = "storage"
facade = facades.Storage
extra_fields = Adapter.extra_fields + (
Field(name="invest_relation_output_capacity", type=float),
Field(name="inflow_conversion_factor", type=float),
Field(name="outflow_conversion_factor", type=float),
)


class ExtractionTurbineAdapter(Adapter):
Expand Down
67 changes: 53 additions & 14 deletions data_adapter_oemof/build_datapackage.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import dataclasses
import os
import warnings
from typing import Optional, Union
from typing import Optional

import pandas as pd
from data_adapter import core
Expand Down Expand Up @@ -89,14 +89,29 @@ def _listify_to_periodic(group_df) -> pd.Series:
group_df
Returns
-------
----------
pd.Series
Examples
----------
| region | year | invest_relation_output_capacity | fixed_costs |
|---------:|-------:|----------------------------------:|--------------:|
| BB | 2016 | 3.3 | 1 |
| BB | 2030 | 3.3 | 2 |
| BB | 2050 | 3.3 | 3 |
->
| type | fixed_costs| name | region | year | invest_relation_output_capacity |
|:--------- |-----------:|:------ |:---------|:---------------:|---:|
| storage | [1, 2, 3] | BB_Lithium_storage_battery | BB |[2016, 2030, 2050]|3.3 |
"""

if "year" not in group_df.columns:
return group_df

unique_values = pd.Series()
unique_values = pd.Series(dtype=object)
for col in group_df.columns: # Exclude 'name' column
if isinstance(group_df[col][group_df.index[0]], dict):
# Unique input/output parameters are not allowed per period
Expand All @@ -105,10 +120,7 @@ def _listify_to_periodic(group_df) -> pd.Series:
# Lists and Series can be passed for special Facades only.
# Sequences shall be passed as sequences (via links.csv):
elif any(
[
isinstance(col_entry, Union[list, pd.Series])
for col_entry in group_df[col]
]
[isinstance(col_entry, (pd.Series, list)) for col_entry in group_df[col]]
):
values = group_df[col].explode().unique()
else:
Expand Down Expand Up @@ -235,6 +247,7 @@ def get_periods_from_parametrized_sequences(
sequence["periods"] = sequence.groupby(sequence.index.year).ngroup()
# TODO timeincrement might be adjusted later to modify objective weighting
sequence["timeincrement"] = 1
sequence.index.name = "timeindex"
return sequence
else:
pass
Expand Down Expand Up @@ -309,8 +322,17 @@ def save_datapackage_to_csv(self, destination: str) -> None:
resource["schema"].update({"foreignKeys": []})
if "name" in field_names:
resource["schema"].update({"primaryKey": "name"})

elif (
"sequence" in resource["name"].split("_")
or resource["name"] == "periods"
):
pass
else:
warnings.warn("Primary keys differing from `name` not implemented yet")
warnings.warn(
"Primary keys differing from `name` not implemented yet."
f"Check primary Keys for resource {resource['name']}"
)

# re-initialize Package with added foreign keys and save datapackage.json
Package(package.descriptor).save(os.path.join(destination, "datapackage.json"))
Expand All @@ -327,8 +349,22 @@ def yearly_scalars_to_periodic_values(scalar_dataframe) -> None:
Then iterates for every element in parametrized elements, groups them for name
then applies aggregation method
Returns None
-------
This leads to aggregating periodically changing values to a list
with as many entries as there are periods and
non changing values are kept as what they have been.
Only values should change periodically that can change and identifiers must be unique.
Examples:
| region | year | invest_relation_output_capacity | fixed_costs |
|---------:|-------:|----------------------------------:|--------------:|
| BB | 2016 | 3.3 | 1 |
| BB | 2030 | 3.3 | 2 |
| BB | 2050 | 3.3 | 3 |
Returns:
| type | fixed_costs| name | region | year | invest_relation_output_capacity |
|:--------- |-----------:|:------ |:---------|:---------------:|---:|
| storage | [1, 2, 3] | BB_Lithium_storage_battery | BB |[2016, 2030, 2050]|3.3 |
"""
identifiers = ["region", "carrier", "tech"]
Expand Down Expand Up @@ -362,14 +398,17 @@ def build_datapackage(
Parameters
----------
adapter: Adapter
Adapter from oemof_data_adapter that is able to handle parameter model data
from Databus. Adapter needs to be initialized with `structure_name`. Use `links_
Adapter from data_adapter that is able to handle parameter model data
from Databus. Adapter needs to be initialized with `structure_name`.
Use `links` to add data from different processes to each other.
Use `structure` to map busses to "processes" and "Adapters"
process_adapter_map
Maps process names to adapter names, if not set default mapping is used
parameter_map
Maps parameter names from adapter to facade, if not set default mapping is used
Maps parameter names from adapter to facade, if not set default mapping is used.
Make sure to map "sequence" entries on "sequence profile names" (see example)
bus_map
Maps facade busses to adapter busses, if not set default mapping is used
Maps facade bus names to adapter bus names, if not set default mapping is used
Returns
-------
Expand Down
11 changes: 7 additions & 4 deletions data_adapter_oemof/mappings.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def map_key(self, key):
return self.mapping["DEFAULT"][key]

# 5. Use key if no mapping available
logger.warning(f"Key not found. Did not map '{key}'")
logger.debug(f"Key not found. Did not map '{key}'")
return key

def get_data(self, key, field_type: Optional[Type] = None):
Expand All @@ -108,7 +108,7 @@ def get_data(self, key, field_type: Optional[Type] = None):
if key in self.timeseries.columns:
return key
# 1.2.2 Take column name if only one time series is available
if len(self.timeseries) == 1:
if len(self.timeseries.columns) == 1:
timeseries_key = self.timeseries.columns[0]
logger.info(
"Key not found in timeseries. "
Expand All @@ -123,7 +123,9 @@ def get_data(self, key, field_type: Optional[Type] = None):
return DEFAULT_MAPPING[key]

# 3 Return None if no data is available
logger.warning(f"Could not get data for mapped key '{key}'")
logger.debug(
f"No {key} data in {self.process_name} as a {self.adapter.__name__}"
)
return None

def get(self, key, field_type: Optional[Type] = None):
Expand Down Expand Up @@ -184,7 +186,8 @@ def get_busses(self, struct):
else:
warnings.warn(
"Please check structure and provide either one set of inputs/outputs "
"or specify as default Parameter specific busses not implemented yet"
"or specify as default Parameter specific busses not implemented yet. "
f"No Bus found for Process {self.process_name} in Adapter {self.adapter}"
)

# 2. Check for default busses
Expand Down
Loading

0 comments on commit d5feec3

Please sign in to comment.