diff --git a/power_grid_model_c/power_grid_model/include/power_grid_model/auxiliary/dataset.hpp b/power_grid_model_c/power_grid_model/include/power_grid_model/auxiliary/dataset.hpp index 3108f6307c..b759f40bcd 100644 --- a/power_grid_model_c/power_grid_model/include/power_grid_model/auxiliary/dataset.hpp +++ b/power_grid_model_c/power_grid_model/include/power_grid_model/auxiliary/dataset.hpp @@ -287,6 +287,9 @@ template class Dataset { } constexpr bool is_dense(Idx const i) const { return is_dense(buffers_[i]); } constexpr bool is_dense(Buffer const& buffer) const { return buffer.indptr.empty(); } + constexpr bool is_dense() const { + return std::ranges::all_of(buffers_, [this](Buffer const& buffer) { return is_dense(buffer); }); + } constexpr bool is_sparse(std::string_view component, bool with_attribute_buffers = false) const { Idx const idx = find_component(component, false); if (idx == invalid_index) { @@ -510,10 +513,52 @@ template class Dataset { return result; } + // get slice dataset from batch + Dataset get_slice_scenario(Idx begin, Idx end) const + requires(!is_indptr_mutable_v) + { + assert(begin <= end); + assert(0 <= begin); + assert(end <= batch_size()); + assert(is_batch()); + assert(is_dense()); + + // empty slice + if (begin == end) { + Dataset result{true, 0, dataset_info_.dataset->name, *meta_data_}; + result.add_buffer("node", 0, 0, nullptr, nullptr); + return result; + } + + // start with begin + Dataset result = get_individual_scenario(begin); + Idx const batch_size = end - begin; + result.dataset_info_.is_batch = true; + result.dataset_info_.batch_size = batch_size; + for (auto& component_info : result.dataset_info_.component_info) { + Idx const size = component_info.elements_per_scenario * batch_size; + component_info.total_elements = size; + } + return result; + } + + void set_next_cartesian_product_dimension(Dataset const* next) { + Dataset const* current = next; + while (current != nullptr) { + if (this == current) { + throw DatasetError{"Cannot create cyclic cartesian product dimension linked list!\n"}; + } + current = current->get_next_cartesian_product_dimension(); + } + next_ = next; + } + Dataset const* get_next_cartesian_product_dimension() const { return next_; } + private: MetaData const* meta_data_; DatasetInfo dataset_info_; std::vector buffers_; + Dataset const* next_{}; std::span get_indptr_span(Indptr* indptr) const { return std::span{indptr, static_cast(batch_size() + 1)}; diff --git a/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/dataset.h b/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/dataset.h index 6edd6dd0d0..f65b260b01 100644 --- a/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/dataset.h +++ b/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/dataset.h @@ -212,6 +212,22 @@ PGM_API void PGM_dataset_const_add_attribute_buffer(PGM_Handle* handle, PGM_Cons */ PGM_API PGM_DatasetInfo const* PGM_dataset_const_get_info(PGM_Handle* handle, PGM_ConstDataset const* dataset); +/** + * @brief Set the next const dataset as cartesian product dimension. + * + * This function allows users to run a batch calculation with multiple dimensions of scenarios. + * The way users can archive this is to combine multiple batch datasets + * to create a multi-dimension batch calculation using a linked list pattern. The calculation core will + * interpret the combined dataset as a cartesian product on a linked list of all the scenarios. + * Each batch dataset in the linked list represents one dimension of the cartesian product. + * + * @param handle + * @param dataset + * @param next_dataset The next dataset in the linked list. + */ +PGM_API void PGM_dataset_const_set_next_cartesian_product_dimension(PGM_Handle* handle, PGM_ConstDataset* dataset, + PGM_ConstDataset const* next_dataset); + /** * @brief Get the dataset info of the instance PGM_WritableDataset. * @param handle diff --git a/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/model.h b/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/model.h index 3827aefb6a..5c5ad3ab22 100644 --- a/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/model.h +++ b/power_grid_model_c/power_grid_model_c/include/power_grid_model_c/model.h @@ -91,6 +91,11 @@ PGM_API void PGM_get_indexer(PGM_Handle* handle, PGM_PowerGridModel const* model * If batch_dataset == NULL, it is a one-time calculation. * If batch_dataset != NULL, it is a batch calculation with batch update in the batch_dataset. * + * The user can use the function set_next_cartesian_product_dimension() to combine multiple batch datasets + * to create a multi-dimension batch calculation using a linked list pattern. The calculation core will + * interpret the combined dataset as a cartesian product on a linked list of all the scenarios. + * Each batch dataset in the linked list represents one dimension of the cartesian product. + * * You need to pre-allocate all output buffer. * * Use PGM_error_code() and PGM_error_message() to check the error. diff --git a/power_grid_model_c/power_grid_model_c/src/dataset.cpp b/power_grid_model_c/power_grid_model_c/src/dataset.cpp index 0b9e3bee6e..13c24c5635 100644 --- a/power_grid_model_c/power_grid_model_c/src/dataset.cpp +++ b/power_grid_model_c/power_grid_model_c/src/dataset.cpp @@ -101,6 +101,13 @@ void PGM_dataset_const_add_attribute_buffer(PGM_Handle* handle, PGM_ConstDataset PGM_regular_error); } +void PGM_dataset_const_set_next_cartesian_product_dimension(PGM_Handle* handle, PGM_ConstDataset* dataset, + PGM_ConstDataset const* next_dataset) { + call_with_catch( + handle, [dataset, next_dataset]() { dataset->set_next_cartesian_product_dimension(next_dataset); }, + PGM_regular_error); +} + PGM_DatasetInfo const* PGM_dataset_const_get_info(PGM_Handle* /*unused*/, PGM_ConstDataset const* dataset) { return &dataset->get_description(); } diff --git a/power_grid_model_c/power_grid_model_c/src/model.cpp b/power_grid_model_c/power_grid_model_c/src/model.cpp index 8310c62eae..4579648f86 100644 --- a/power_grid_model_c/power_grid_model_c/src/model.cpp +++ b/power_grid_model_c/power_grid_model_c/src/model.cpp @@ -15,6 +15,9 @@ #include #include +#include +#include + namespace { using namespace power_grid_model; } // namespace @@ -55,6 +58,7 @@ void PGM_get_indexer(PGM_Handle* handle, PGM_PowerGridModel const* model, char c PGM_regular_error); } +// helper functions namespace { void check_no_experimental_features_used(MainModel const& model, MainModel::Options const& opt) { // optionally add experimental feature checks here @@ -142,9 +146,11 @@ constexpr auto extract_calculation_options(PGM_Options const& opt) { } } // namespace -// run calculation -void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt, - PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) { +// calculation implementation +namespace { + +void calculate_impl(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt, + PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) { PGM_clear_error(handle); // check dataset integrity if ((batch_dataset != nullptr) && (!batch_dataset->is_batch() || !output_dataset->is_batch())) { @@ -180,5 +186,88 @@ void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options co } } +void merge_batch_error_msgs(PGM_Handle* handle, PGM_Handle const& local_handle, Idx scenario_offset, Idx stride_size) { + if (local_handle.err_code == PGM_no_error) { + return; + } + handle->err_code = PGM_batch_error; + if (local_handle.err_code == PGM_batch_error) { + for (auto&& [idx, err_msg] : std::views::zip(local_handle.failed_scenarios, local_handle.batch_errs)) { + handle->failed_scenarios.push_back(idx + scenario_offset); + handle->batch_errs.push_back(err_msg); + } + } else { + for (Idx i = 0; i < stride_size; ++i) { + handle->failed_scenarios.push_back(scenario_offset + i); + handle->batch_errs.push_back(local_handle.err_msg); + } + } +} + +Idx get_batch_dimension(PGM_ConstDataset const* batch_dataset) { + Idx dimension = 0; + while (batch_dataset != nullptr) { + ++dimension; + batch_dataset = batch_dataset->get_next_cartesian_product_dimension(); + } + return dimension; +} + +Idx get_stride_size(PGM_ConstDataset const* batch_dataset) { + Idx size = 1; + PGM_ConstDataset const* current = batch_dataset->get_next_cartesian_product_dimension(); + while (current != nullptr) { + size *= current->batch_size(); + current = current->get_next_cartesian_product_dimension(); + } + return size; +} + +} // namespace + +// run calculation +void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt, + PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) { + // for dimension < 2 (one-time or 1D batch), call implementation directly + if (auto const batch_dimension = get_batch_dimension(batch_dataset); batch_dimension < 2) { + calculate_impl(handle, model, opt, output_dataset, batch_dataset); + return; + } + + // get stride size of the rest of dimensions + Idx const first_batch_size = batch_dataset->batch_size(); + Idx const stride_size = get_stride_size(batch_dataset); + + // loop over the first dimension batch + for (Idx i = 0; i < first_batch_size; ++i) { + // a new handle + PGM_Handle local_handle{}; + // create sliced datasets for the rest of dimensions + PGM_ConstDataset const single_update_dataset = batch_dataset->get_individual_scenario(i); + PGM_MutableDataset const sliced_output_dataset = + output_dataset->get_slice_scenario(i * stride_size, (i + 1) * stride_size); + + // create a model copy + std::unique_ptr const local_model{PGM_copy_model(&local_handle, model)}; + if (local_handle.err_code != PGM_no_error) { + merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size); + continue; + } + + // apply the update + PGM_update_model(&local_handle, local_model.get(), &single_update_dataset); + if (local_handle.err_code != PGM_no_error) { + merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size); + continue; + } + + // recursive call + PGM_calculate(&local_handle, local_model.get(), opt, &sliced_output_dataset, + batch_dataset->get_next_cartesian_product_dimension()); + // merge errors + merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size); + } +} + // destroy model void PGM_destroy_model(PGM_PowerGridModel* model) { delete model; } diff --git a/power_grid_model_c/power_grid_model_cpp/include/power_grid_model_cpp/dataset.hpp b/power_grid_model_c/power_grid_model_cpp/include/power_grid_model_cpp/dataset.hpp index 3adb5cf44b..47e48460ae 100644 --- a/power_grid_model_c/power_grid_model_cpp/include/power_grid_model_cpp/dataset.hpp +++ b/power_grid_model_c/power_grid_model_cpp/include/power_grid_model_cpp/dataset.hpp @@ -198,6 +198,10 @@ class DatasetConst { data.get()); } + void set_next_cartesian_product_dimension(DatasetConst const& next_dataset) { + handle_.call_with(PGM_dataset_const_set_next_cartesian_product_dimension, get(), next_dataset.get()); + } + DatasetInfo const& get_info() const { return info_; } private: diff --git a/src/power_grid_model/_core/power_grid_core.py b/src/power_grid_model/_core/power_grid_core.py index c276303273..31bd224bd2 100644 --- a/src/power_grid_model/_core/power_grid_core.py +++ b/src/power_grid_model/_core/power_grid_core.py @@ -481,6 +481,12 @@ def destroy_dataset_const(self, dataset: ConstDatasetPtr) -> None: # type: igno def dataset_const_get_info(self, dataset: ConstDatasetPtr) -> DatasetInfoPtr: # type: ignore[empty-body] pass # pragma: no cover + @make_c_binding + def dataset_const_set_next_cartesian_product_dimension( + self, dataset: ConstDatasetPtr, next_dataset: ConstDatasetPtr + ) -> None: # type: ignore[empty-body] + pass # pragma: no cover + @make_c_binding def dataset_writable_get_info(self, dataset: WritableDatasetPtr) -> DatasetInfoPtr: # type: ignore[empty-body] pass # pragma: no cover diff --git a/src/power_grid_model/_core/power_grid_dataset.py b/src/power_grid_model/_core/power_grid_dataset.py index f6013f16f0..578e1e68fa 100644 --- a/src/power_grid_model/_core/power_grid_dataset.py +++ b/src/power_grid_model/_core/power_grid_dataset.py @@ -350,6 +350,16 @@ def get_info(self) -> CDatasetInfo: """ return CDatasetInfo(pgc.dataset_const_get_info(self._const_dataset)) + def set_next_cartesian_product_dimension(self, next_dataset: "CConstDataset") -> None: + """ + Set the next dataset in the linked list. + + Args: + next_dataset: The next dataset to set. + """ + pgc.dataset_const_set_next_cartesian_product_dimension(self._const_dataset, next_dataset._const_dataset) + assert_no_error() + def __del__(self): pgc.destroy_dataset_const(self._const_dataset) diff --git a/src/power_grid_model/_core/power_grid_model.py b/src/power_grid_model/_core/power_grid_model.py index 49fbf36bff..fce2cdcb99 100644 --- a/src/power_grid_model/_core/power_grid_model.py +++ b/src/power_grid_model/_core/power_grid_model.py @@ -6,7 +6,9 @@ Main power grid model class """ +import itertools from enum import IntEnum +from math import prod from typing import Any, overload import numpy as np @@ -281,7 +283,7 @@ def _calculate_impl( # noqa: PLR0913 self, calculation_type: CalculationType, symmetric: bool, - update_data: Dataset | None, + update_data: Dataset | list[Dataset] | None, output_component_types: ComponentAttributeMapping, options: Options, continue_on_batch_error: bool, @@ -303,15 +305,19 @@ def _calculate_impl( # noqa: PLR0913 Returns: """ self._batch_error = None - is_batch = update_data is not None - - if update_data is not None: - prepared_update = prepare_update_view(update_data) - update_ptr = prepared_update.get_dataset_ptr() - batch_size = prepared_update.get_info().batch_size() + if update_data is None: + is_batch = False + update_data = [] else: - update_ptr = ConstDatasetPtr() - batch_size = 1 + is_batch = True + if not isinstance(update_data, list): + update_data = [update_data] + update_data = [_map_to_component_types(x) for x in update_data] + prepared_update = [prepare_update_view(x) for x in update_data] + for this_dataset, next_dataset in itertools.pairwise(prepared_update): + this_dataset.set_next_cartesian_product_dimension(next_dataset) + update_ptr: ConstDatasetPtr = prepared_update[0].get_dataset_ptr() if prepared_update else ConstDatasetPtr() + batch_size = prod(x.get_info().batch_size() for x in prepared_update) output_data = self._construct_output( output_component_types=output_component_types, @@ -349,7 +355,7 @@ def _calculate_power_flow( # noqa: PLR0913 error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, - update_data: Dataset | None = None, + update_data: Dataset | list[Dataset] | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -386,7 +392,7 @@ def _calculate_state_estimation( # noqa: PLR0913 error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, - update_data: Dataset | None = None, + update_data: Dataset | list[Dataset] | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -418,7 +424,7 @@ def _calculate_short_circuit( # noqa: PLR0913 self, *, calculation_method: CalculationMethod | str = CalculationMethod.iec60909, - update_data: Dataset | None = None, + update_data: Dataset | list[Dataset] | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -515,7 +521,7 @@ def calculate_power_flow( error_tolerance: float = ..., max_iterations: int = ..., calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ..., continue_on_batch_error: bool = ..., @@ -530,7 +536,7 @@ def calculate_power_flow( error_tolerance: float = ..., max_iterations: int = ..., calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: ComponentAttributeFilterOptions = ..., continue_on_batch_error: bool = ..., @@ -545,7 +551,7 @@ def calculate_power_flow( error_tolerance: float = ..., max_iterations: int = ..., calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: ComponentAttributeMappingDict = ..., continue_on_batch_error: bool = ..., @@ -559,7 +565,7 @@ def calculate_power_flow( # noqa: PLR0913 error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, - update_data: BatchDataset | None = None, + update_data: BatchDataset | list[BatchDataset] | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -583,8 +589,9 @@ def calculate_power_flow( # noqa: PLR0913 - newton_raphson: Use Newton-Raphson iterative method (default). - linear: Use linear method. - update_data (dict, optional): + update_data (dict, list of dict, optional): None: Calculate power flow once with the current model attributes. + Or a dictionary for batch calculation with batch update. - key: Component type name to be updated in batch. @@ -601,6 +608,13 @@ def calculate_power_flow( # noqa: PLR0913 compressed sparse structure. https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html - data: 1D numpy structured array in flat. + Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product. + The calculation core will interpret these datasets as a cartesian product of all the scenarios. + Each batch dataset in the list represents one dimension of the cartesian product. + The output will then have row size equal to the product of the batch sizes of all these datasets, + in 1D flat structure. + E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively, + and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5). threading (int, optional): Applicable only for batch calculation. - < 0: Sequential @@ -643,7 +657,7 @@ def calculate_power_flow( # noqa: PLR0913 error_tolerance=error_tolerance, max_iterations=max_iterations, calculation_method=calculation_method, - update_data=(_map_to_component_types(update_data) if update_data is not None else None), + update_data=update_data, threading=threading, output_component_types=output_component_types, continue_on_batch_error=continue_on_batch_error, @@ -701,7 +715,7 @@ def calculate_state_estimation( error_tolerance: float = ..., max_iterations: int = ..., calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ..., continue_on_batch_error: bool = ..., @@ -715,7 +729,7 @@ def calculate_state_estimation( error_tolerance: float = ..., max_iterations: int = ..., calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: ComponentAttributeFilterOptions = ..., continue_on_batch_error: bool = ..., @@ -729,7 +743,7 @@ def calculate_state_estimation( error_tolerance: float = ..., max_iterations: int = ..., calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: ComponentAttributeMappingDict = ..., continue_on_batch_error: bool = ..., @@ -742,7 +756,7 @@ def calculate_state_estimation( # noqa: PLR0913 error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, - update_data: BatchDataset | None = None, + update_data: BatchDataset | list[BatchDataset] | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -764,6 +778,7 @@ def calculate_state_estimation( # noqa: PLR0913 calculation_method (an enumeration): Use iterative linear method. update_data (dict, optional): None: Calculate state estimation once with the current model attributes. + Or a dictionary for batch calculation with batch update. - key: Component type name to be updated in batch. @@ -780,6 +795,13 @@ def calculate_state_estimation( # noqa: PLR0913 compressed sparse structure. https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html - data: 1D numpy structured array in flat. + Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product. + The calculation core will interpret these datasets as a cartesian product of all the scenarios. + Each batch dataset in the list represents one dimension of the cartesian product. + The output will then have row size equal to the product of the batch sizes of all these datasets, + in 1D flat structure. + E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively, + and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5). threading (int, optional): Applicable only for batch calculation. - < 0: Sequential @@ -822,7 +844,7 @@ def calculate_state_estimation( # noqa: PLR0913 error_tolerance=error_tolerance, max_iterations=max_iterations, calculation_method=calculation_method, - update_data=(_map_to_component_types(update_data) if update_data is not None else None), + update_data=update_data, threading=threading, output_component_types=output_component_types, continue_on_batch_error=continue_on_batch_error, @@ -870,7 +892,7 @@ def calculate_short_circuit( self, *, calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ..., continue_on_batch_error: bool = ..., @@ -882,7 +904,7 @@ def calculate_short_circuit( self, *, calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: ComponentAttributeFilterOptions = ..., continue_on_batch_error: bool = ..., @@ -894,7 +916,7 @@ def calculate_short_circuit( self, *, calculation_method: CalculationMethod | str = ..., - update_data: BatchDataset = ..., + update_data: BatchDataset | list[BatchDataset] = ..., threading: int = ..., output_component_types: ComponentAttributeMappingDict = ..., continue_on_batch_error: bool = ..., @@ -905,7 +927,7 @@ def calculate_short_circuit( # noqa: PLR0913 self, *, calculation_method: CalculationMethod | str = CalculationMethod.iec60909, - update_data: BatchDataset | None = None, + update_data: BatchDataset | list[BatchDataset] | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -920,6 +942,7 @@ def calculate_short_circuit( # noqa: PLR0913 calculation_method (an enumeration): Use the iec60909 standard. update_data: None: calculate a short circuit once with the current model attributes. + Or a dictionary for batch calculation with batch update - key: Component type name to be updated in batch @@ -936,6 +959,13 @@ def calculate_short_circuit( # noqa: PLR0913 compressed sparse structure. https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html - data: 1D numpy structured array in flat. + Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product. + The calculation core will interpret these datasets as a cartesian product of all the scenarios. + Each batch dataset in the list represents one dimension of the cartesian product. + The output will then have row size equal to the product of the batch sizes of all these datasets, + in 1D flat structure. + E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively, + and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5). threading (int, optional): Applicable only for batch calculation. - < 0: Sequential @@ -977,7 +1007,7 @@ def calculate_short_circuit( # noqa: PLR0913 """ return self._calculate_short_circuit( calculation_method=calculation_method, - update_data=(_map_to_component_types(update_data) if update_data is not None else None), + update_data=update_data, threading=threading, output_component_types=output_component_types, continue_on_batch_error=continue_on_batch_error, diff --git a/tests/native_api_tests/CMakeLists.txt b/tests/native_api_tests/CMakeLists.txt index e58c80b726..739b8be3cc 100644 --- a/tests/native_api_tests/CMakeLists.txt +++ b/tests/native_api_tests/CMakeLists.txt @@ -10,6 +10,7 @@ set(PROJECT_SOURCES "test_api_model_update.cpp" "test_api_serialization.cpp" "test_api_utils.cpp" + "test_api_model_multi_dimension.cpp" ) add_executable(power_grid_model_api_tests ${PROJECT_SOURCES}) diff --git a/tests/native_api_tests/test_api_model_multi_dimension.cpp b/tests/native_api_tests/test_api_model_multi_dimension.cpp new file mode 100644 index 0000000000..fd8dfc5327 --- /dev/null +++ b/tests/native_api_tests/test_api_model_multi_dimension.cpp @@ -0,0 +1,113 @@ +// SPDX-FileCopyrightText: Contributors to the Power Grid Model project +// +// SPDX-License-Identifier: MPL-2.0 + +#include "load_dataset.hpp" + +#include "power_grid_model_cpp.hpp" + +#include + +#include + +#include +#include +#include +#include +#include + +namespace power_grid_model_cpp { +namespace { +using namespace std::string_literals; +using power_grid_model_cpp_test::load_dataset; +using std::numbers::sqrt3; + +// input +auto const complete_state_json = R"json({ + "version": "1.0", + "type": "input", + "is_batch": false, + "attributes": {}, + "data": { + "sym_load": [ + {"id": 2, "node": 0, "status": 1, "type": 0, "p_specified": 0, "q_specified": 0} + ], + "source": [ + {"id": 1, "node": 0, "status": 1, "u_ref": 1, "sk": 1e20} + ], + "node": [ + {"id": 0, "u_rated": 10e3} + ] + } +})json"s; + +} // namespace + +TEST_CASE("API Model Multi-Dimension") { + // model + auto const owning_input_dataset = load_dataset(complete_state_json); + auto const& input_dataset = owning_input_dataset.dataset; + Model model{50.0, input_dataset}; + + // 3-D batch update + double const u_rated = 10e3; + std::vector const u_ref{0.9, 1.0, 1.1}; + std::vector const p_specified{1e6, 2e6, 3e6, 4e6}; + std::vector const q_specified{0.1e6, 0.2e6, 0.3e6, 0.4e6, 0.5e6}; + Idx const size_u_ref = std::ssize(u_ref); + Idx const size_p_specified = std::ssize(p_specified); + Idx const size_q_specified = std::ssize(q_specified); + Idx const total_batch_size = size_u_ref * size_p_specified * size_q_specified; + + // calculate source current manually + std::vector i_source_ref(total_batch_size); + for (Idx i = 0; i < size_u_ref; ++i) { + for (Idx j = 0; j < size_p_specified; ++j) { + for (Idx k = 0; k < size_q_specified; ++k) { + Idx const index = i * size_p_specified * size_q_specified + j * size_q_specified + k; + double const s = std::abs(std::complex{p_specified[j], q_specified[k]}); + i_source_ref[index] = s / (sqrt3 * u_rated * u_ref[i]); + } + } + } + + // construct batch update dataset + DatasetConst batch_u_ref{"update", true, size_u_ref}; + batch_u_ref.add_buffer("source", 1, size_u_ref, nullptr, nullptr); + batch_u_ref.add_attribute_buffer("source", "u_ref", u_ref.data()); + DatasetConst batch_p_specified{"update", true, size_p_specified}; + batch_p_specified.add_buffer("sym_load", 1, size_p_specified, nullptr, nullptr); + batch_p_specified.add_attribute_buffer("sym_load", "p_specified", p_specified.data()); + DatasetConst batch_q_specified{"update", true, size_q_specified}; + batch_q_specified.add_buffer("sym_load", 1, size_q_specified, nullptr, nullptr); + batch_q_specified.add_attribute_buffer("sym_load", "q_specified", q_specified.data()); + batch_u_ref.set_next_cartesian_product_dimension(batch_p_specified); + batch_p_specified.set_next_cartesian_product_dimension(batch_q_specified); + + SUBCASE("Correct cartesian product usage") { + // output dataset + std::vector i_source_result(total_batch_size); + DatasetMutable batch_output_dataset{"sym_output", true, total_batch_size}; + batch_output_dataset.add_buffer("source", 1, total_batch_size, nullptr, nullptr); + batch_output_dataset.add_attribute_buffer("source", "i", i_source_result.data()); + + // options + Options const options{}; + + // calculate + model.calculate(options, batch_output_dataset, batch_u_ref); + + // check results + for (Idx idx = 0; idx < total_batch_size; ++idx) { + CHECK(i_source_result[idx] == doctest::Approx(i_source_ref[idx])); + } + } + SUBCASE("Linked list item referring to itself is not allowed") { + CHECK_THROWS_AS(batch_u_ref.set_next_cartesian_product_dimension(batch_u_ref), PowerGridRegularError); + } + SUBCASE("Cyclic linked list is not allowed") { + CHECK_THROWS_AS(batch_q_specified.set_next_cartesian_product_dimension(batch_u_ref), PowerGridRegularError); + } +} + +} // namespace power_grid_model_cpp diff --git a/tests/unit/test_multi_dimensional_batch.py b/tests/unit/test_multi_dimensional_batch.py new file mode 100644 index 0000000000..2ca9e13faa --- /dev/null +++ b/tests/unit/test_multi_dimensional_batch.py @@ -0,0 +1,48 @@ +# SPDX-FileCopyrightText: Contributors to the Power Grid Model project +# +# SPDX-License-Identifier: MPL-2.0 + +import json + +import numpy as np + +from power_grid_model import PowerGridModel +from power_grid_model.utils import json_deserialize + +input_data = { + "version": "1.0", + "type": "input", + "is_batch": False, + "attributes": {}, + "data": { + "sym_load": [{"id": 2, "node": 0, "status": 1, "type": 0, "p_specified": 0, "q_specified": 0}], + "source": [{"id": 1, "node": 0, "status": 1, "u_ref": 1, "sk": 1e20}], + "node": [{"id": 0, "u_rated": 10e3}], + }, +} + +input_data_json = json.dumps(input_data) + + +def test_multi_dimensional_batch(): + input_dataset = json_deserialize(input_data_json) + pgm = PowerGridModel(input_dataset) + + u_rated = 10e3 + u_ref = np.array([0.9, 1.0, 1.1], dtype=np.float64).reshape(-1, 1) + p_specified = np.array([1e6, 2e6, 3e6, 4e6], dtype=np.float64).reshape(-1, 1) + q_specified = np.array([0.1e6, 0.2e6, 0.3e6, 0.4e6, 0.5e6], dtype=np.float64).reshape(-1, 1) + i_source_ref = np.abs(p_specified.reshape(1, -1, 1) + 1j * q_specified.reshape(1, 1, -1)) / ( + u_ref.reshape(-1, 1, 1) * u_rated * np.sqrt(3) + ) + i_source_ref = i_source_ref.ravel() + + u_ref_batch = {"source": {"u_ref": u_ref}} + p_specified_batch = {"sym_load": {"p_specified": p_specified}} + q_specified_batch = {"sym_load": {"q_specified": q_specified}} + + result = pgm.calculate_power_flow( + update_data=[u_ref_batch, p_specified_batch, q_specified_batch], output_component_types={"source": ["i"]} + ) + + assert np.allclose(result["source"]["i"].ravel(), i_source_ref)