Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
66 commits
Select commit Hold shift + click to select a range
8085e5b
start slice dataset
TonyXiang8787 Nov 10, 2025
73d85c5
add slice scenario
TonyXiang8787 Nov 10, 2025
234f38d
Merge branch 'main' into experimental/multi-dimension-batch
TonyXiang8787 Nov 10, 2025
be44d80
batch dimension
TonyXiang8787 Nov 10, 2025
1fd7ed3
calculation implementation
TonyXiang8787 Nov 10, 2025
45f4d72
error handling still needs to be done
TonyXiang8787 Nov 10, 2025
06cb330
error handling
TonyXiang8787 Nov 10, 2025
a4fc01e
add batch dimensions
TonyXiang8787 Nov 10, 2025
118655f
batch dimension
TonyXiang8787 Nov 10, 2025
3b41f28
start test
TonyXiang8787 Nov 10, 2025
d88db97
start test
TonyXiang8787 Nov 10, 2025
1b1190d
api will not work as intended
TonyXiang8787 Nov 10, 2025
21c31c5
api will not work as intended
TonyXiang8787 Nov 10, 2025
6e8c081
adjust md dataset
TonyXiang8787 Nov 11, 2025
0b989b4
add dataset
TonyXiang8787 Nov 11, 2025
14b4039
crash yet
TonyXiang8787 Nov 11, 2025
3338881
fix bounds checking
TonyXiang8787 Nov 11, 2025
01c86a9
remove span
TonyXiang8787 Nov 11, 2025
7786b0c
fix clang tidy
TonyXiang8787 Nov 11, 2025
5a3f394
format|
TonyXiang8787 Nov 11, 2025
2650968
[skip ci] add cfunc in python
TonyXiang8787 Nov 11, 2025
9aaa6bd
force nullptr
TonyXiang8787 Nov 11, 2025
e4aa439
add options
TonyXiang8787 Nov 11, 2025
237681f
proxy for multidimensional in python
TonyXiang8787 Nov 11, 2025
b602f2e
modify main calculate input
TonyXiang8787 Nov 11, 2025
3e79237
type annotation
TonyXiang8787 Nov 11, 2025
8d8d80c
[skip ci] not working yet
TonyXiang8787 Nov 11, 2025
61cfd57
fix dimensions
TonyXiang8787 Nov 11, 2025
a1331ba
fix mypy
TonyXiang8787 Nov 12, 2025
36707d9
Merge branch 'main' into experimental/multi-dimension-batch
TonyXiang8787 Nov 23, 2025
9c27e6e
empty slice
TonyXiang8787 Nov 25, 2025
bce908e
Merge branch 'main' into experimental/multi-dimension-batch
TonyXiang8787 Nov 25, 2025
7faa6c1
begin next
TonyXiang8787 Nov 25, 2025
c4b854e
revert api change
TonyXiang8787 Nov 25, 2025
cb67ae3
model calculate for chaining
TonyXiang8787 Nov 25, 2025
a7352f2
add dataset chaining
TonyXiang8787 Nov 25, 2025
b3c8901
revert cpp interface
TonyXiang8787 Nov 25, 2025
fa6fdf9
c-api ready
TonyXiang8787 Nov 25, 2025
65888cd
revert python side
TonyXiang8787 Nov 25, 2025
61e915f
adjust python chaining
TonyXiang8787 Nov 25, 2025
fb22a70
python side ready
TonyXiang8787 Nov 25, 2025
79936a6
fix format
TonyXiang8787 Nov 26, 2025
2e0ff73
fix format
TonyXiang8787 Nov 26, 2025
fda2687
Merge branch 'main' into experimental/md-batch-chaining
TonyXiang8787 Dec 11, 2025
e106133
Update tests/native_api_tests/test_api_model_multi_dimension.cpp
TonyXiang8787 Dec 11, 2025
951fa68
fix comments
TonyXiang8787 Dec 11, 2025
04b1abd
Merge branch 'main' into experimental/md-batch-chaining
TonyXiang8787 Dec 12, 2025
4306491
refactor stride size
TonyXiang8787 Dec 12, 2025
6f1b5a1
change set/get next in cpp
TonyXiang8787 Dec 12, 2025
b472331
change get/set next in capi
TonyXiang8787 Dec 12, 2025
3185c93
modify python side set next
TonyXiang8787 Dec 12, 2025
b18d06c
add c-api docs
TonyXiang8787 Dec 12, 2025
5967452
try python docs
TonyXiang8787 Dec 12, 2025
e203ff1
try space
TonyXiang8787 Dec 12, 2025
d222d3c
adjust python docs
TonyXiang8787 Dec 12, 2025
d1dc1cf
avoid cyclic chain
TonyXiang8787 Dec 12, 2025
26f6ca4
format
TonyXiang8787 Dec 12, 2025
f58e13f
sonar
mgovers Dec 15, 2025
30ec19c
Update power_grid_model_c/power_grid_model_c/include/power_grid_model…
TonyXiang8787 Dec 15, 2025
54a8d01
do not use chain word
TonyXiang8787 Dec 15, 2025
7b106f2
Merge branch 'main' into experimental/md-batch-chaining
TonyXiang8787 Dec 15, 2025
a1c0710
Update power_grid_model_c/power_grid_model_c/src/model.cpp
TonyXiang8787 Dec 15, 2025
76dec6e
Update power_grid_model_c/power_grid_model_c/include/power_grid_model…
TonyXiang8787 Dec 15, 2025
f5e69df
Update src/power_grid_model/_core/power_grid_model.py
TonyXiang8787 Dec 15, 2025
7e2c865
modify test case
TonyXiang8787 Dec 15, 2025
c208af1
modify test case
TonyXiang8787 Dec 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,9 @@ template <dataset_type_tag dataset_type_> class Dataset {
}
constexpr bool is_dense(Idx const i) const { return is_dense(buffers_[i]); }
constexpr bool is_dense(Buffer const& buffer) const { return buffer.indptr.empty(); }
constexpr bool is_dense() const {
return std::ranges::all_of(buffers_, [this](Buffer const& buffer) { return is_dense(buffer); });
}
constexpr bool is_sparse(std::string_view component, bool with_attribute_buffers = false) const {
Idx const idx = find_component(component, false);
if (idx == invalid_index) {
Expand Down Expand Up @@ -510,10 +513,52 @@ template <dataset_type_tag dataset_type_> class Dataset {
return result;
}

// get slice dataset from batch
Dataset get_slice_scenario(Idx begin, Idx end) const
requires(!is_indptr_mutable_v<dataset_type>)
{
assert(begin <= end);
assert(0 <= begin);
assert(end <= batch_size());
assert(is_batch());
assert(is_dense());

// empty slice
if (begin == end) {
Dataset result{true, 0, dataset_info_.dataset->name, *meta_data_};
result.add_buffer("node", 0, 0, nullptr, nullptr);
return result;
}

// start with begin
Dataset result = get_individual_scenario(begin);
Idx const batch_size = end - begin;
result.dataset_info_.is_batch = true;
result.dataset_info_.batch_size = batch_size;
for (auto& component_info : result.dataset_info_.component_info) {
Idx const size = component_info.elements_per_scenario * batch_size;
component_info.total_elements = size;
}
return result;
}

void set_next_cartesian_product_dimension(Dataset const* next) {
Dataset const* current = next;
while (current != nullptr) {
if (this == current) {
throw DatasetError{"Cannot create cyclic cartesian product dimension linked list!\n"};
}
current = current->get_next_cartesian_product_dimension();
}
next_ = next;
}
Dataset const* get_next_cartesian_product_dimension() const { return next_; }

private:
MetaData const* meta_data_;
DatasetInfo dataset_info_;
std::vector<Buffer> buffers_;
Dataset const* next_{};

std::span<Indptr> get_indptr_span(Indptr* indptr) const {
return std::span{indptr, static_cast<size_t>(batch_size() + 1)};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,22 @@ PGM_API void PGM_dataset_const_add_attribute_buffer(PGM_Handle* handle, PGM_Cons
*/
PGM_API PGM_DatasetInfo const* PGM_dataset_const_get_info(PGM_Handle* handle, PGM_ConstDataset const* dataset);

/**
* @brief Set the next const dataset as cartesian product dimension.
*
* This function allows users to run a batch calculation with multiple dimensions of scenarios.
* The way users can archive this is to combine multiple batch datasets
* to create a multi-dimension batch calculation using a linked list pattern. The calculation core will
* interpret the combined dataset as a cartesian product on a linked list of all the scenarios.
* Each batch dataset in the linked list represents one dimension of the cartesian product.
*
* @param handle
* @param dataset
* @param next_dataset The next dataset in the linked list.
*/
PGM_API void PGM_dataset_const_set_next_cartesian_product_dimension(PGM_Handle* handle, PGM_ConstDataset* dataset,
PGM_ConstDataset const* next_dataset);

/**
* @brief Get the dataset info of the instance PGM_WritableDataset.
* @param handle
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,11 @@ PGM_API void PGM_get_indexer(PGM_Handle* handle, PGM_PowerGridModel const* model
* If batch_dataset == NULL, it is a one-time calculation.
* If batch_dataset != NULL, it is a batch calculation with batch update in the batch_dataset.
*
* The user can use the function set_next_cartesian_product_dimension() to combine multiple batch datasets
* to create a multi-dimension batch calculation using a linked list pattern. The calculation core will
* interpret the combined dataset as a cartesian product on a linked list of all the scenarios.
* Each batch dataset in the linked list represents one dimension of the cartesian product.
*
* You need to pre-allocate all output buffer.
*
* Use PGM_error_code() and PGM_error_message() to check the error.
Expand Down
7 changes: 7 additions & 0 deletions power_grid_model_c/power_grid_model_c/src/dataset.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,13 @@ void PGM_dataset_const_add_attribute_buffer(PGM_Handle* handle, PGM_ConstDataset
PGM_regular_error);
}

void PGM_dataset_const_set_next_cartesian_product_dimension(PGM_Handle* handle, PGM_ConstDataset* dataset,
PGM_ConstDataset const* next_dataset) {
call_with_catch(
handle, [dataset, next_dataset]() { dataset->set_next_cartesian_product_dimension(next_dataset); },
PGM_regular_error);
}

PGM_DatasetInfo const* PGM_dataset_const_get_info(PGM_Handle* /*unused*/, PGM_ConstDataset const* dataset) {
return &dataset->get_description();
}
Expand Down
95 changes: 92 additions & 3 deletions power_grid_model_c/power_grid_model_c/src/model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
#include <power_grid_model/common/common.hpp>
#include <power_grid_model/main_model.hpp>

#include <memory>
#include <numeric>

namespace {
using namespace power_grid_model;
} // namespace
Expand Down Expand Up @@ -55,6 +58,7 @@ void PGM_get_indexer(PGM_Handle* handle, PGM_PowerGridModel const* model, char c
PGM_regular_error);
}

// helper functions
namespace {
void check_no_experimental_features_used(MainModel const& model, MainModel::Options const& opt) {
// optionally add experimental feature checks here
Expand Down Expand Up @@ -142,9 +146,11 @@ constexpr auto extract_calculation_options(PGM_Options const& opt) {
}
} // namespace

// run calculation
void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt,
PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) {
// calculation implementation
namespace {

void calculate_impl(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt,
PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) {
PGM_clear_error(handle);
// check dataset integrity
if ((batch_dataset != nullptr) && (!batch_dataset->is_batch() || !output_dataset->is_batch())) {
Expand Down Expand Up @@ -180,5 +186,88 @@ void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options co
}
}

void merge_batch_error_msgs(PGM_Handle* handle, PGM_Handle const& local_handle, Idx scenario_offset, Idx stride_size) {
if (local_handle.err_code == PGM_no_error) {
return;
}
handle->err_code = PGM_batch_error;
if (local_handle.err_code == PGM_batch_error) {
for (auto&& [idx, err_msg] : std::views::zip(local_handle.failed_scenarios, local_handle.batch_errs)) {
handle->failed_scenarios.push_back(idx + scenario_offset);
handle->batch_errs.push_back(err_msg);
}
} else {
for (Idx i = 0; i < stride_size; ++i) {
handle->failed_scenarios.push_back(scenario_offset + i);
handle->batch_errs.push_back(local_handle.err_msg);
}
}
}

Idx get_batch_dimension(PGM_ConstDataset const* batch_dataset) {
Idx dimension = 0;
while (batch_dataset != nullptr) {
++dimension;
batch_dataset = batch_dataset->get_next_cartesian_product_dimension();
}
return dimension;
}

Idx get_stride_size(PGM_ConstDataset const* batch_dataset) {
Idx size = 1;
PGM_ConstDataset const* current = batch_dataset->get_next_cartesian_product_dimension();
while (current != nullptr) {
size *= current->batch_size();
current = current->get_next_cartesian_product_dimension();
}
return size;
}

} // namespace

// run calculation
void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are we sure that we want to do this logic here and not in the C++ job dispatch where it's easier to test and also better multi-thread-able? E.g. we could only create separate threads for the outermost dimensions.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can think about this kind of refactor/optimization in later stage. The C-API will not be affected.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

however, this is one of those things in which it's hard to refactor at a later stage. i'd much rather refactor now to ensure that we design for change.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not think it is difficult to refactor in later stage. The current implementation is pretty separated in the C-API files. If we decide to migrate this into the core, we can just reset this change to the old C-API file and implement the MD batch in the core.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

then isn't it easier to do the migration now? we of course don't need to fully support the multi threading thing i thought of, but why not put it in the right location from the start?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have a working implementation now and well-defined user-API. I think the priority should be releasing this fast to the user and hear user experience feedback, before we do another round of internal refactoring.

At a later stage, if and how we are going to refactor this will depend on user business case.

PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) {
// for dimension < 2 (one-time or 1D batch), call implementation directly
if (auto const batch_dimension = get_batch_dimension(batch_dataset); batch_dimension < 2) {
calculate_impl(handle, model, opt, output_dataset, batch_dataset);
return;
}

// get stride size of the rest of dimensions
Idx const first_batch_size = batch_dataset->batch_size();
Idx const stride_size = get_stride_size(batch_dataset);

// loop over the first dimension batch
for (Idx i = 0; i < first_batch_size; ++i) {
// a new handle
PGM_Handle local_handle{};
// create sliced datasets for the rest of dimensions
PGM_ConstDataset const single_update_dataset = batch_dataset->get_individual_scenario(i);
PGM_MutableDataset const sliced_output_dataset =
output_dataset->get_slice_scenario(i * stride_size, (i + 1) * stride_size);

// create a model copy
std::unique_ptr<PGM_PowerGridModel> const local_model{PGM_copy_model(&local_handle, model)};
if (local_handle.err_code != PGM_no_error) {
merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size);
continue;
}

// apply the update
PGM_update_model(&local_handle, local_model.get(), &single_update_dataset);
if (local_handle.err_code != PGM_no_error) {
merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size);
continue;
}

// recursive call
PGM_calculate(&local_handle, local_model.get(), opt, &sliced_output_dataset,
batch_dataset->get_next_cartesian_product_dimension());
// merge errors
merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size);
}
}

// destroy model
void PGM_destroy_model(PGM_PowerGridModel* model) { delete model; }
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,10 @@ class DatasetConst {
data.get());
}

void set_next_cartesian_product_dimension(DatasetConst const& next_dataset) {
handle_.call_with(PGM_dataset_const_set_next_cartesian_product_dimension, get(), next_dataset.get());
}

DatasetInfo const& get_info() const { return info_; }

private:
Expand Down
6 changes: 6 additions & 0 deletions src/power_grid_model/_core/power_grid_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,12 @@ def destroy_dataset_const(self, dataset: ConstDatasetPtr) -> None: # type: igno
def dataset_const_get_info(self, dataset: ConstDatasetPtr) -> DatasetInfoPtr: # type: ignore[empty-body]
pass # pragma: no cover

@make_c_binding
def dataset_const_set_next_cartesian_product_dimension(
self, dataset: ConstDatasetPtr, next_dataset: ConstDatasetPtr
) -> None: # type: ignore[empty-body]
pass # pragma: no cover

@make_c_binding
def dataset_writable_get_info(self, dataset: WritableDatasetPtr) -> DatasetInfoPtr: # type: ignore[empty-body]
pass # pragma: no cover
Expand Down
10 changes: 10 additions & 0 deletions src/power_grid_model/_core/power_grid_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,16 @@ def get_info(self) -> CDatasetInfo:
"""
return CDatasetInfo(pgc.dataset_const_get_info(self._const_dataset))

def set_next_cartesian_product_dimension(self, next_dataset: "CConstDataset") -> None:
"""
Set the next dataset in the linked list.

Args:
next_dataset: The next dataset to set.
"""
pgc.dataset_const_set_next_cartesian_product_dimension(self._const_dataset, next_dataset._const_dataset)
assert_no_error()

def __del__(self):
pgc.destroy_dataset_const(self._const_dataset)

Expand Down
Loading
Loading