Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8,760 changes: 8,760 additions & 0 deletions examples/curve_examples/agriculture_electricity_curve.csv

Large diffs are not rendered by default.

8,760 changes: 8,760 additions & 0 deletions examples/curve_examples/electric_vehicle_profile_5_curve.csv

Large diffs are not rendered by default.

8,760 changes: 8,760 additions & 0 deletions examples/curve_examples/interconnector_8_price_curve.csv

Large diffs are not rendered by default.

196 changes: 196 additions & 0 deletions examples/excel_to_scenario.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "42081dd2",
"metadata": {},
"source": [
"This is still a testing workbook to demonstrate progress on the excel to scenario flows."
]
},
{
"cell_type": "markdown",
"id": "386ce0b0",
"metadata": {},
"source": [
"# Curves"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "c617dc0a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found 3 curve files\n",
"✓ Loaded curve 'interconnector_8_price': 8760 values\n",
"✓ Loaded curve 'electric_vehicle_profile_5': 8760 values\n",
"✓ Loaded curve 'agriculture_electricity': 8760 values\n",
"Created DataFrame with 3 curves and 8760 rows\n"
]
}
],
"source": [
"# For now a custom csv to pd df function - this will be handled by the reverse packer in the end\n",
"\n",
"import pandas as pd\n",
"from pathlib import Path\n",
"from typing import Union\n",
"\n",
"def read_curves_to_dataframe(\n",
" curves_path: Union[str, Path],\n",
" pattern: str = \"*.csv\",\n",
" validate_length: bool = True\n",
") -> pd.DataFrame:\n",
" \"\"\"\n",
" Read multiple curve CSV files into a single DataFrame.\n",
"\n",
" Args:\n",
" curves_path: Directory path containing the curve CSV files\n",
" pattern: File pattern to match (default: \"*.csv\")\n",
" validate_length: Whether to validate each curve has exactly 8760 values\n",
"\n",
" Returns:\n",
" DataFrame with curves as columns, where column names are the curve keys\n",
" (derived from filenames without extension)\n",
"\n",
" Raises:\n",
" ValueError: If validation fails or files have issues\n",
" FileNotFoundError: If no files found matching the pattern\n",
" \"\"\"\n",
" curves_path = Path(curves_path)\n",
"\n",
" if not curves_path.exists():\n",
" raise FileNotFoundError(f\"Directory not found: {curves_path}\")\n",
"\n",
" # Find all CSV files matching the pattern\n",
" csv_files = list(curves_path.glob(pattern))\n",
"\n",
" if not csv_files:\n",
" raise FileNotFoundError(f\"No files found matching pattern '{pattern}' in {curves_path}\")\n",
"\n",
" print(f\"Found {len(csv_files)} curve files\")\n",
"\n",
" curves_data = {}\n",
" errors = []\n",
"\n",
" for csv_file in csv_files:\n",
" # Use filename (without extension) as curve key, remove _curve suffix if present\n",
" curve_key = csv_file.stem\n",
" if curve_key.endswith('_curve'):\n",
" curve_key = curve_key[:-6] # Remove '_curve' suffix\n",
"\n",
" try:\n",
" # Read CSV file - assuming single column of values, no headers\n",
" curve_data = pd.read_csv(\n",
" csv_file,\n",
" header=None, # No header row\n",
" index_col=False, # No index column\n",
" dtype=float # All values should be numeric\n",
" )\n",
"\n",
" # Convert DataFrame to Series if single column\n",
" if isinstance(curve_data, pd.DataFrame):\n",
" if len(curve_data.columns) == 1:\n",
" curve_data = curve_data.iloc[:, 0]\n",
" else:\n",
" errors.append(f\"{curve_key}: Expected 1 column, found {len(curve_data.columns)}\")\n",
" continue\n",
"\n",
" # Drop any NaN values\n",
" curve_data = curve_data.dropna()\n",
"\n",
" # Validate length if requested\n",
" if validate_length and len(curve_data) != 8760:\n",
" errors.append(f\"{curve_key}: Expected 8760 values, found {len(curve_data)}\")\n",
" continue\n",
"\n",
" # Store with curve key as column name\n",
" curves_data[curve_key] = curve_data.values\n",
" print(f\"✓ Loaded curve '{curve_key}': {len(curve_data)} values\")\n",
"\n",
" except Exception as e:\n",
" errors.append(f\"{curve_key}: Error reading file - {str(e)}\")\n",
" continue\n",
"\n",
" if errors:\n",
" error_msg = \"Errors reading curve files:\\n\" + \"\\n\".join(f\" - {err}\" for err in errors)\n",
" if not curves_data: # No curves loaded successfully\n",
" raise ValueError(error_msg)\n",
" else:\n",
" print(f\"Warning: Some curves failed to load:\\n{error_msg}\")\n",
"\n",
" if not curves_data:\n",
" raise ValueError(\"No curves were successfully loaded\")\n",
"\n",
" # Create DataFrame from the curves\n",
" df = pd.DataFrame(curves_data)\n",
"\n",
" # Set index to represent hours (0-8759 for a full year)\n",
" df.index.name = \"hour\"\n",
"\n",
" print(f\"Created DataFrame with {len(df.columns)} curves and {len(df)} rows\")\n",
" return df\n",
"\n",
"# User uploads Excel/CSV → DataFrame → CustomCurves object\n",
"from pyetm.models.custom_curves import CustomCurves\n",
"\n",
"df = read_curves_to_dataframe(\"curve_examples/\")\n",
"custom_curves = CustomCurves._from_dataframe(df)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "e75a03b2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Environment setup complete\n",
" Using ETM API at http://localhost:3000/api/v3\n",
" Token loaded? True\n",
"API connection ready\n"
]
}
],
"source": [
"from example_helpers import setup_notebook\n",
"from pyetm.models import Scenario\n",
"\n",
"setup_notebook()\n",
"scenario = Scenario.load(2690288)\n",
"\n",
"# Update curves on scenario\n",
"scenario.update_custom_curves(custom_curves)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "pyetm-qKH2ozgc",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
38 changes: 29 additions & 9 deletions src/pyetm/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ class Base(BaseModel):

# Enable assignment validation
model_config = ConfigDict(validate_assignment=True)

_warning_collector: WarningCollector = PrivateAttr(default_factory=WarningCollector)

def __init__(self, **data: Any) -> None:
Expand Down Expand Up @@ -52,9 +51,15 @@ def __setattr__(self, name: str, value: Any) -> None:
Handle assignment with validation error capture.
Simplified from the original complex implementation.
"""
# Skip validation for private attributes
if name.startswith("_") or name not in self.__class__.model_fields:
super().__setattr__(name, value)
# Skip validation for private attributes, methods/functions, or existing methods
if (
name.startswith("_")
or name not in self.__class__.model_fields
or callable(value)
or hasattr(self.__class__, name)
):
# Use object.__setattr__ to bypass Pydantic for these cases
object.__setattr__(self, name, value)
return

# Clear existing warnings for this field
Expand Down Expand Up @@ -106,17 +111,32 @@ def _clear_warnings_for_attr(self, field: str) -> None:
def _merge_submodel_warnings(self, *submodels: Base, key_attr: str = None) -> None:
"""
Merge warnings from nested Base models.
Maintains compatibility with existing code while using the new system.
"""
self._warning_collector.merge_submodel_warnings(*submodels, key_attr=key_attr)

@classmethod
def load_safe(cls: Type[T], **data: Any) -> T:
def from_dataframe(cls: Type[T], df: pd.DataFrame, **kwargs) -> T:
"""
Alternate constructor that always returns an instance,
converting all validation errors into warnings.
Create an instance from a pandas DataFrame.
"""
return cls(**data)
try:
return cls._from_dataframe(df, **kwargs)
except Exception as e:
# Create a fallback instance with warnings
instance = cls.model_construct()
instance.add_warning(
"from_dataframe", f"Failed to create from DataFrame: {e}"
)
return instance

@classmethod
def _from_dataframe(cls, df: pd.DataFrame, **kwargs):
"""
Private method to be implemented by each subclass for specific deserialization logic.
"""
raise NotImplementedError(
f"{cls.__name__} must implement _from_dataframe() class method"
)

def _get_serializable_fields(self) -> List[str]:
"""
Expand Down
Loading