-
Notifications
You must be signed in to change notification settings - Fork 47
Multi-dimensional batch calculation: list of update batch dataset as a cartesian product #1201
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
8085e5b
73d85c5
234f38d
be44d80
1fd7ed3
45f4d72
06cb330
a4fc01e
118655f
3b41f28
d88db97
1b1190d
21c31c5
6e8c081
0b989b4
14b4039
3338881
01c86a9
7786b0c
5a3f394
2650968
9aaa6bd
e4aa439
237681f
b602f2e
3e79237
8d8d80c
61cfd57
a1331ba
36707d9
9c27e6e
bce908e
7faa6c1
c4b854e
cb67ae3
a7352f2
b3c8901
fa6fdf9
65888cd
61e915f
fb22a70
79936a6
2e0ff73
fda2687
e106133
951fa68
04b1abd
4306491
6f1b5a1
b472331
3185c93
b18d06c
5967452
e203ff1
d222d3c
d1dc1cf
26f6ca4
f58e13f
30ec19c
54a8d01
7b106f2
a1c0710
76dec6e
f5e69df
7e2c865
c208af1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,6 +15,9 @@ | |
| #include <power_grid_model/common/common.hpp> | ||
| #include <power_grid_model/main_model.hpp> | ||
|
|
||
| #include <memory> | ||
| #include <numeric> | ||
|
|
||
| namespace { | ||
| using namespace power_grid_model; | ||
| } // namespace | ||
|
|
@@ -55,6 +58,7 @@ void PGM_get_indexer(PGM_Handle* handle, PGM_PowerGridModel const* model, char c | |
| PGM_regular_error); | ||
| } | ||
|
|
||
| // helper functions | ||
| namespace { | ||
TonyXiang8787 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| void check_no_experimental_features_used(MainModel const& model, MainModel::Options const& opt) { | ||
| // optionally add experimental feature checks here | ||
|
|
@@ -142,9 +146,11 @@ constexpr auto extract_calculation_options(PGM_Options const& opt) { | |
| } | ||
| } // namespace | ||
|
|
||
| // run calculation | ||
| void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt, | ||
| PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) { | ||
| // calculation implementation | ||
| namespace { | ||
TonyXiang8787 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| void calculate_impl(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt, | ||
| PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) { | ||
| PGM_clear_error(handle); | ||
| // check dataset integrity | ||
| if ((batch_dataset != nullptr) && (!batch_dataset->is_batch() || !output_dataset->is_batch())) { | ||
|
|
@@ -180,5 +186,88 @@ void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options co | |
| } | ||
| } | ||
|
|
||
| void merge_batch_error_msgs(PGM_Handle* handle, PGM_Handle const& local_handle, Idx scenario_offset, Idx stride_size) { | ||
| if (local_handle.err_code == PGM_no_error) { | ||
| return; | ||
| } | ||
figueroa1395 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| handle->err_code = PGM_batch_error; | ||
| if (local_handle.err_code == PGM_batch_error) { | ||
| for (auto&& [idx, err_msg] : std::views::zip(local_handle.failed_scenarios, local_handle.batch_errs)) { | ||
| handle->failed_scenarios.push_back(idx + scenario_offset); | ||
| handle->batch_errs.push_back(err_msg); | ||
| } | ||
| } else { | ||
| for (Idx i = 0; i < stride_size; ++i) { | ||
| handle->failed_scenarios.push_back(scenario_offset + i); | ||
| handle->batch_errs.push_back(local_handle.err_msg); | ||
| } | ||
TonyXiang8787 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| } | ||
| } | ||
|
|
||
| Idx get_batch_dimension(PGM_ConstDataset const* batch_dataset) { | ||
TonyXiang8787 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Idx dimension = 0; | ||
| while (batch_dataset != nullptr) { | ||
| ++dimension; | ||
| batch_dataset = batch_dataset->get_next_cartesian_product_dimension(); | ||
| } | ||
| return dimension; | ||
| } | ||
|
|
||
| Idx get_stride_size(PGM_ConstDataset const* batch_dataset) { | ||
| Idx size = 1; | ||
| PGM_ConstDataset const* current = batch_dataset->get_next_cartesian_product_dimension(); | ||
| while (current != nullptr) { | ||
| size *= current->batch_size(); | ||
| current = current->get_next_cartesian_product_dimension(); | ||
| } | ||
| return size; | ||
| } | ||
|
|
||
| } // namespace | ||
|
|
||
| // run calculation | ||
| void PGM_calculate(PGM_Handle* handle, PGM_PowerGridModel* model, PGM_Options const* opt, | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. are we sure that we want to do this logic here and not in the C++ job dispatch where it's easier to test and also better multi-thread-able? E.g. we could only create separate threads for the outermost dimensions.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can think about this kind of refactor/optimization in later stage. The C-API will not be affected.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. however, this is one of those things in which it's hard to refactor at a later stage. i'd much rather refactor now to ensure that we design for change.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I do not think it is difficult to refactor in later stage. The current implementation is pretty separated in the C-API files. If we decide to migrate this into the core, we can just reset this change to the old C-API file and implement the MD batch in the core.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. then isn't it easier to do the migration now? we of course don't need to fully support the multi threading thing i thought of, but why not put it in the right location from the start?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We have a working implementation now and well-defined user-API. I think the priority should be releasing this fast to the user and hear user experience feedback, before we do another round of internal refactoring. At a later stage, if and how we are going to refactor this will depend on user business case. |
||
| PGM_MutableDataset const* output_dataset, PGM_ConstDataset const* batch_dataset) { | ||
TonyXiang8787 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| // for dimension < 2 (one-time or 1D batch), call implementation directly | ||
| if (auto const batch_dimension = get_batch_dimension(batch_dataset); batch_dimension < 2) { | ||
| calculate_impl(handle, model, opt, output_dataset, batch_dataset); | ||
| return; | ||
| } | ||
|
|
||
| // get stride size of the rest of dimensions | ||
| Idx const first_batch_size = batch_dataset->batch_size(); | ||
| Idx const stride_size = get_stride_size(batch_dataset); | ||
|
|
||
| // loop over the first dimension batch | ||
| for (Idx i = 0; i < first_batch_size; ++i) { | ||
| // a new handle | ||
| PGM_Handle local_handle{}; | ||
| // create sliced datasets for the rest of dimensions | ||
| PGM_ConstDataset const single_update_dataset = batch_dataset->get_individual_scenario(i); | ||
| PGM_MutableDataset const sliced_output_dataset = | ||
| output_dataset->get_slice_scenario(i * stride_size, (i + 1) * stride_size); | ||
|
|
||
| // create a model copy | ||
| std::unique_ptr<PGM_PowerGridModel> const local_model{PGM_copy_model(&local_handle, model)}; | ||
| if (local_handle.err_code != PGM_no_error) { | ||
| merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size); | ||
| continue; | ||
| } | ||
|
|
||
| // apply the update | ||
| PGM_update_model(&local_handle, local_model.get(), &single_update_dataset); | ||
| if (local_handle.err_code != PGM_no_error) { | ||
| merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size); | ||
| continue; | ||
| } | ||
|
|
||
| // recursive call | ||
| PGM_calculate(&local_handle, local_model.get(), opt, &sliced_output_dataset, | ||
| batch_dataset->get_next_cartesian_product_dimension()); | ||
| // merge errors | ||
| merge_batch_error_msgs(handle, local_handle, i * stride_size, stride_size); | ||
| } | ||
| } | ||
|
|
||
| // destroy model | ||
| void PGM_destroy_model(PGM_PowerGridModel* model) { delete model; } | ||
Uh oh!
There was an error while loading. Please reload this page.