Skip to content

Commit

Permalink
[GPU] Fix dynamic loop's not matched issue during multiple shapes are…
Browse files Browse the repository at this point in the history
… inferenced

- Disable memory reuse for updated shape in case of dep of loop
  • Loading branch information
kelvinchoi-intel committed Feb 16, 2024
1 parent 742c017 commit a65e9e4
Show file tree
Hide file tree
Showing 4 changed files with 199 additions and 2 deletions.
1 change: 1 addition & 0 deletions src/plugins/intel_gpu/src/graph/include/loop_inst.h
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,7 @@ class typed_primitive_inst<loop> : public typed_primitive_inst_base<loop> {
std::vector<backedge_memory_mapping> backedge_memory_mappings;
std::vector<concatenated_memory_mapping::ptr> concatenated_input_mem_mappings;
std::vector<concatenated_memory_mapping::ptr> concatenated_output_mem_mappings;
std::map<primitive_id, ov::PartialShape> dynamic_sliced_layout_mappings;

static std::string to_string(const loop_node& node);

Expand Down
19 changes: 18 additions & 1 deletion src/plugins/intel_gpu/src/graph/loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -375,12 +375,19 @@ loop_inst::concatenated_memory_mapping::ptr loop_inst::create_concat_memory_map(
if (extern_mem_ptr != nullptr) {
layout sliced_layout = intern_prim->get_output_layout(internal_id.idx);
auto inter_mem_ptr = intern_prim->output_memory_ptr(internal_id.idx);
if (inter_mem_ptr == nullptr) {
if (inter_mem_ptr == nullptr || shape_changed()) {
// if inner body intern_prim has no output memory because it has dynamic shape,
// calculate inner body intern_prim layout using concat_mem's layout.
auto updated_sliced_layout = sliced_layout.get_partial_shape();
OPENVINO_ASSERT(updated_sliced_layout[io_prim_map.axis].is_static() || num_iterations > 0,
"Not allowed dynamic dimension for axis when num_iteraiont is negative");

auto dyn_sliced_layout = dynamic_sliced_layout_mappings.find(internal_id.pid);
if (dyn_sliced_layout != dynamic_sliced_layout_mappings.end() &&
dyn_sliced_layout->second.is_dynamic()) {
updated_sliced_layout = dyn_sliced_layout->second;
}

auto concat_pshape = extern_prim->get_output_layout().get_partial_shape();
const auto shape_size = concat_pshape.size();
for (size_t i = 0; i < shape_size; i++) {
Expand Down Expand Up @@ -658,6 +665,16 @@ loop_inst::typed_primitive_inst(network & network, loop_node const & node)
_current_iteration_id = node.get_current_iteration_id();
_condition_id = node.get_execution_condition_id();
_num_iterations_id = node.get_num_iterations_id();

for (auto& input_map : _input_primitive_maps) {
const auto& internal_id = input_map.internal_id;
auto intern_prim = body_network->get_primitive(internal_id.pid);
layout sliced_layout = intern_prim->get_output_layout(internal_id.idx);
auto sliced_layout_ps = sliced_layout.get_partial_shape();
if (sliced_layout_ps.is_dynamic()) {
dynamic_sliced_layout_mappings.insert(std::pair<primitive_id, ov::PartialShape>(internal_id.pid, sliced_layout_ps));
}
}
}

void loop_inst::postprocess_output_memory(bool is_dynamic, int64_t current_iteration) {
Expand Down
14 changes: 13 additions & 1 deletion src/plugins/intel_gpu/src/graph/primitive_inst.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -536,8 +536,20 @@ event::ptr primitive_inst::realloc_if_needed() {
}
}

bool has_loop_user = false;
for (const auto& user : _node->get_users()) {
if (user->is_type<loop>()) {
has_loop_user = true;
}
}

// Clear out memory if if was previously reused, but now primitive can't be optimized
if (_node->is_type<gather>() || _node->is_type<permute>() || _node->is_type<reshape>() || _node->is_type<reorder>() || _node->is_type<strided_slice>()) {
if (!has_loop_user &&
(_node->is_type<gather>() ||
_node->is_type<permute>() ||
_node->is_type<reshape>() ||
_node->is_type<reorder>() ||
_node->is_type<strided_slice>())) {
if (can_be_optimized()) {
_max_output_layout_count = _deps[0].first->_max_output_layout_count;
return ev;
Expand Down
167 changes: 167 additions & 0 deletions src/plugins/intel_gpu/tests/unit/test_cases/loop_gpu_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -601,3 +601,170 @@ TEST(loop_gpu, support_dynamic_tensoriterator_outer_axis) {

test_loop_gpu_wo_trip_count({ 2, 1, 1, 2}, { 2, 5, 1, 2}, input_data_5_4, output_data_5_4, 1, 4);
}

static void test_loop_gpu_wo_trip_count_w_multiple_shapes(ov::PartialShape body_input_layout,
std::vector<ov::PartialShape> whole_layouts,
std::vector<std::vector<float>> input_data_list,
std::vector<float> expected_output_data,
size_t axis,
size_t exit_value,
bool is_caching_test = false) {
auto& engine = get_test_engine();

auto b_input_layout = cldnn::layout{ body_input_layout, data_types::f32, format::bfyx };

ov::PartialShape sliced_input_shape = body_input_layout;
sliced_input_shape[axis] = 1;
auto sliced_input_layout = cldnn::layout{ sliced_input_shape, data_types::f32, format::bfyx };

auto const_layout = cldnn::layout{ {}, data_types::i64, format::bfyx };

auto e_initial_condition_mem = engine.allocate_memory(const_layout);
auto e_num_iteration_mem = engine.allocate_memory(const_layout);
auto b_exit_value_mem = engine.allocate_memory(const_layout);
auto b_index_inc_mem = engine.allocate_memory(const_layout);

// initialize input buffers
set_values(e_initial_condition_mem, {1});
set_values(b_exit_value_mem, {exit_value});
set_values(b_index_inc_mem, {1});
set_values(e_num_iteration_mem, {0});

primitive_id body_current_iteration_id = "b_index";
primitive_id body_execution_condition_id = "b_cond_exit_value";

cldnn::topology body(
input_layout(body_current_iteration_id, const_layout),
input_layout("b_add_data", sliced_input_layout),
input_layout("b_mul_data", sliced_input_layout),
data("b_exit_value", b_exit_value_mem),
data("b_index_inc", b_index_inc_mem),
eltwise("b_index_update", input_info(body_current_iteration_id), input_info("b_index_inc"), eltwise_mode::sum),
reorder("b_index_cast", input_info("b_index_update"),
cldnn::format::any, data_types::f32, {}, cldnn::reorder_mean_mode::subtract, cldnn::padding(), true),
eltwise(body_execution_condition_id, input_info("b_index"), input_info("b_exit_value"), eltwise_mode::lt),
eltwise("b_add", input_info("b_add_data"), input_info("b_index_cast"), eltwise_mode::sum),
eltwise("b_mul", input_info("b_mul_data"), input_info("b_index_cast"), eltwise_mode::prod));

primitive_id trip_count_id = "";
primitive_id actual_iteration_count_id = "actual_iteration_count";
primitive_id initial_condition_id = "initial_condition";
int64_t num_iterations = -1;

std::vector<loop::io_primitive_map> input_primitive_maps {
loop::io_primitive_map("input", "b_add_data", axis),
loop::io_primitive_map("input", "b_mul_data", axis),
loop::io_primitive_map(actual_iteration_count_id, body_current_iteration_id) };
std::vector<loop::io_primitive_map> output_primitive_maps {
loop::io_primitive_map(cldnn::input_info("loop", 0), cldnn::input_info("b_add", 0), axis),
loop::io_primitive_map(cldnn::input_info("loop", 1), cldnn::input_info("b_mul", 0), axis) };
std::vector<loop::backedge_mapping> back_edges {
loop::backedge_mapping("b_index_update", body_current_iteration_id) };

auto body_program = build_program(engine, body, body_execution_condition_id, output_primitive_maps, back_edges, true);

cldnn::topology topology(
input_layout("input", b_input_layout),
input_layout(initial_condition_id, e_initial_condition_mem->get_layout()),
mutable_data(actual_iteration_count_id, e_num_iteration_mem),
loop("loop", { input_info(actual_iteration_count_id), input_info(initial_condition_id), input_info("input") }, body_program,
trip_count_id, initial_condition_id, actual_iteration_count_id,
input_primitive_maps, output_primitive_maps, back_edges,
num_iterations, body_current_iteration_id, body_execution_condition_id, 2),
eltwise("out_sum", input_info("loop", 0), input_info("loop", 1), eltwise_mode::sum));

ExecutionConfig config = get_test_default_config(engine);
config.set_property(ov::intel_gpu::allow_new_shape_infer(true));

cldnn::network::ptr network = get_network(engine, topology, config, get_test_stream_ptr(), is_caching_test);


for (size_t i = 0 ; i < whole_layouts.size(); i++) {
auto whole_layout = whole_layouts[i];
auto input_data = input_data_list[i];

// initialize input buffers
set_values(e_initial_condition_mem, {1});
set_values(b_exit_value_mem, {exit_value});
set_values(b_index_inc_mem, {1});
set_values(e_num_iteration_mem, {0});

auto e_input_layout = cldnn::layout{ whole_layout, data_types::f32, format::bfyx };
auto e_input_mem = engine.allocate_memory(e_input_layout); // b,f,x,y
auto expected_output_layout = whole_layout;
set_values(e_input_mem, input_data);
network->set_input_data("input", e_input_mem);

network->set_input_data(initial_condition_id, e_initial_condition_mem);

auto outputs = network->execute();
ASSERT_EQ(outputs.size(), 1);

auto expected_num_iterations = (exit_value + 1);
expected_output_layout[axis] = expected_num_iterations;
auto e_output_layout = cldnn::layout{ expected_output_layout, data_types::f32, format::bfyx };

auto num_iter_mem = network->get_output_memory(actual_iteration_count_id);
if (num_iter_mem != nullptr) {
mem_lock<int64_t> num_iter_ptr{ num_iter_mem, get_test_stream() };
ASSERT_EQ(num_iter_ptr.data()[0], expected_num_iterations);
}

std::vector<float> expected(input_data.size());
if (expected_output_data.size() == 0) {
size_t unit = 1;
for (size_t k = axis; k < whole_layout.size(); k++) {
unit *= whole_layout[k].get_length();
}

for (size_t j = 0; j < input_data.size(); j++) {
auto val = static_cast<size_t>((j % unit) / 4) + 1;
expected[j] = static_cast<float>(input_data[j] + val) + static_cast<float>(input_data[j] * val);
}
} else {
expected = expected_output_data;
}

auto output_mem = outputs.begin()->second.get_memory();
auto output_layout = output_mem->get_layout();
ASSERT_EQ(output_layout.batch(), e_output_layout.batch());
ASSERT_EQ(output_layout.feature(), e_output_layout.feature());
ASSERT_EQ(output_layout.spatial(0), e_output_layout.spatial(0));
ASSERT_EQ(output_layout.spatial(1), e_output_layout.spatial(1));
// value check
{
mem_lock<float> output_ptr{ output_mem, get_test_stream() };
for (size_t i = 0, iend = output_layout.count(); i < iend; ++i) {
ASSERT_FLOAT_EQ(output_ptr[i], expected.at(i));
}
}
}
}

std::vector<float> input_data_4_4{
1.0f, 2.0f, -15.f, 3.0f,
4.0f, -15.f, 5.0f, 6.0f,
-15.f, 7.0f, -15.f, 0.0f,
0.0f, -15.f, 0.5f, -0.5f,
};

std::vector<float> input_data_2_4_4{
1.0f, 2.0f, -15.f, 3.0f,
4.0f, -15.f, 5.0f, 6.0f,
-15.f, 7.0f, -15.f, 0.0f,
0.0f, -15.f, 0.5f, -0.5f,

1.0f, 2.0f, -15.f, 3.0f,
4.0f, -15.f, 5.0f, 6.0f,
-15.f, 7.0f, -15.f, 0.0f,
0.0f, -15.f, 0.5f, -0.5f,
};

TEST(loop_gpu, support_loop_w_dynamic_input_w_various_shapes) {
test_loop_gpu_wo_trip_count_w_multiple_shapes(
{ 1, -1, 4, 4 },
{{ 1, 1, 4, 4 }, { 1, 2, 4, 4 }}, // axis value should be iter_num = (exit_value + 1)
{input_data_4_4, input_data_2_4_4},
std::vector<float>(),
2, 3);
}

0 comments on commit a65e9e4

Please sign in to comment.