@@ -9809,7 +9809,9 @@ using TestParamType_convolution_gpu_onednn = ::testing::tuple< int, // 0 - I
98099809 int, // 10 - Batch
98109810 format, // 11 - Input data format
98119811 std::string, // 12 - Implementation name
9812- bool>; // 13 - With bias
9812+ bool, // 13 - With bias
9813+ format, // 14 - Expected convolution format
9814+ bool>; // 15 - Is dynamic
98139815
98149816struct convolution_gpu_onednn : public ::testing::TestWithParam<TestParamType_convolution_gpu_onednn> {
98159817 static std::string PrintToStringParamName(
@@ -9827,7 +9829,9 @@ struct convolution_gpu_onednn : public ::testing::TestWithParam<TestParamType_co
98279829 std::to_string(testing::get<9>(param_info.param)) + "_batch" +
98289830 std::to_string(testing::get<10>(param_info.param)) + "_format" +
98299831 std::to_string(testing::get<11>(param_info.param)) + "_with_bias_" +
9830- std::to_string(testing::get<13>(param_info.param));
9832+ std::to_string(testing::get<13>(param_info.param)) + "_conv_format_" +
9833+ std::to_string(testing::get<14>(param_info.param)) + "_is_dynamic_" +
9834+ std::to_string(testing::get<15>(param_info.param));
98319835
98329836 if (testing::get<12>(param_info.param) != "") {
98339837 res += "_kernel_" + testing::get<12>(param_info.param);
@@ -9842,11 +9846,11 @@ INSTANTIATE_TEST_SUITE_P(conv_onednn_cases,
98429846 ::testing::Values(
98439847 // Input X size, Input Y size, Input Z size, Input features, Output features,
98449848 // Kernel size X, Kernel size Y, Kernel size Z, Groups number, Stride, Batch,
9845- // Input data format, Implementation name, WithBias
9846- TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", true),
9847- TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", false)
9848- // TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", true ),
9849- // TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", false)
9849+ // Input data format, Implementation name, WithBias, Expected Conv format, Is-dynamic
9850+ TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", true, format::bs_fs_yx_bsv32_fsv16, false ),
9851+ TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", true, format::byxf, true),
9852+ TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", false, format::bs_fs_yx_bsv32_fsv16, false ),
9853+ TestParamType_convolution_gpu_onednn(8, 8, 1, 32, 32, 3, 3, 1, 1, 1, 32, format::bfyx, "", false, format::byxf, true )
98509854 ),
98519855 convolution_gpu_onednn::PrintToStringParamName);
98529856
@@ -9876,11 +9880,25 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
98769880 auto input_data_format = testing::get<11>(GetParam());
98779881 auto impl_name = testing::get<12>(GetParam());
98789882 auto with_bias = testing::get<13>(GetParam());
9883+ auto expected_conv_format = testing::get<14>(GetParam());
9884+ auto is_dynamic = testing::get<15>(GetParam());
9885+
9886+ ov::PartialShape target_pshape = {batch_num, input_f, input_x, input_y};
9887+ ov::PartialShape input_pshape;
9888+
9889+ if (is_dynamic) {
9890+ for (size_t i = 0; i < target_pshape.size(); ++i) {
9891+ input_pshape.emplace_back(ov::Dimension());
9892+ }
9893+ input_pshape[1] = target_pshape[1];
9894+ } else {
9895+ input_pshape = target_pshape;
9896+ }
9897+ layout in_layout{input_pshape, data_types::f16, format::bfyx};
98799898
9880- auto input_size = tensor(batch_num, input_f, input_x, input_y);
98819899 auto input_data = rg.generate_random_4d<ov::float16>(batch_num, input_f, input_y, input_x, -1, 1);
98829900 auto input_data_bfyx = flatten_4d(format::bfyx, input_data);
9883- auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size });
9901+ auto input_mem = engine.allocate_memory({ target_pshape, data_types::f16, format::bfyx });
98849902 set_values(input_mem, input_data_bfyx);
98859903
98869904 auto weights_size = tensor(output_f, input_f, filter_y, filter_x, 1);
@@ -9912,10 +9930,10 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
99129930 }
99139931 }
99149932
9915- topology.add(input_layout("input", input_mem->get_layout() ),
9933+ topology.add(input_layout("input", in_layout ),
99169934 data("weights_fsv", weights_mem),
99179935 data("bias", biases_mem),
9918- reorder("input_fsv", input_info("input"), { data_types::f16, input_data_format, input_size } ));
9936+ reorder("input_fsv", input_info("input"), input_data_format, data_types::f16));
99199937
99209938 auto conv_fsv = convolution("conv_fsv",
99219939 input_info("input_fsv"),
@@ -9943,9 +9961,9 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
99439961 }
99449962 }
99459963
9946- topology.add(input_layout("input", input_mem->get_layout() ),
9964+ topology.add(input_layout("input", in_layout ),
99479965 data("weights_fsv", weights_mem),
9948- reorder("input_fsv", input_info("input"), { data_types::f16, input_data_format, input_size } ));
9966+ reorder("input_fsv", input_info("input"), input_data_format, data_types::f16));
99499967
99509968 auto conv_fsv = convolution("conv_fsv",
99519969 input_info("input_fsv"),
@@ -9964,6 +9982,8 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
99649982 ExecutionConfig config = get_test_default_config(engine);
99659983 config.set_property(ov::intel_gpu::optimize_data(true));
99669984 config.set_property(ov::intel_gpu::custom_outputs(std::vector<std::string>{"conv_fsv","reorder_bfyx"}));
9985+ if (is_dynamic)
9986+ config.set_property(ov::intel_gpu::allow_new_shape_infer(true));
99679987 network network(engine, topology, config);
99689988
99699989 network.set_input_data("input", input_mem);
@@ -9974,8 +9994,10 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
99749994 for (auto& p : network.get_primitives_info())
99759995 std::cerr << p.original_id << " " << p.kernel_id << std::endl;
99769996
9977- auto out_ptr = get_output_values_to_float<ov::float16>(network, outputs.find("conv_fsv")->second);
9978- auto out_lay = network.get_primitive("conv_fsv")->get_node_output_layout();
9997+ auto out_ptr = get_output_values_to_float<float>(network, outputs.find("reorder_bfyx")->second);
9998+ auto output_memory = outputs.at("reorder_bfyx").get_memory();
9999+ auto out_lay = output_memory->get_layout();
10000+
997910001 ASSERT_EQ(out_lay.batch(), expected_result.size());
998010002 ASSERT_EQ(out_lay.feature(), expected_result[0].size());
998110003 ASSERT_EQ(out_lay.spatial(1), expected_result[0][0].size());
@@ -9998,6 +10020,9 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) {
999810020 }
999910021 ASSERT_TRUE(equal);
1000010022 }
10023+
10024+ out_lay = network.get_primitive("conv_fsv")->get_node_output_layout();
10025+ ASSERT_EQ(out_lay.get_format(), expected_conv_format);
1000110026}
1000210027
1000310028TEST(convolution_gpu_onednn, padding_for_cldnn_kernel_after_onednn) {
@@ -10174,6 +10199,81 @@ TEST(convolution_gpu_onednn, spatial_1d) {
1017410199 }
1017510200}
1017610201
10202+ TEST(convolution_gpu_onednn, spatial_1d_dynamic) {
10203+ auto& engine = get_test_engine();
10204+ if (!engine.get_device_info().supports_immad)
10205+ return;
10206+
10207+ tests::random_generator rg(GET_SUITE_NAME);
10208+ ov::PartialShape target_pshape = {1, 16, 6};
10209+ ov::PartialShape input_pshape = {ov::Dimension(), 16, 6};
10210+ ov::PartialShape weights_pshape = {16, 16, 3};
10211+ layout in_layout{ input_pshape, data_types::f16, format::bfyx };
10212+ layout in_ref_layout{ target_pshape, data_types::f16, format::bfyx };
10213+ layout weights_layout{ weights_pshape, data_types::f16, format::bfyx };
10214+ auto input_data = rg.generate_random_1d<ov::float16>(ov::shape_size(target_pshape.get_shape()), -1, 1);
10215+ auto input_mem = engine.allocate_memory({target_pshape, data_types::f16, format::bfyx});
10216+ set_values(input_mem, input_data);
10217+
10218+ auto weights_data = rg.generate_random_1d<ov::float16>(weights_layout.count(), -1, 1);
10219+ auto weights_mem = engine.allocate_memory(weights_layout);
10220+ set_values(weights_mem, weights_data);
10221+
10222+ auto input = input_layout("input", in_layout);
10223+ auto input_ref = input_layout("input", in_ref_layout);
10224+ auto weights = data("weights", weights_mem);
10225+ auto conv = convolution("conv",
10226+ input_info("input"),
10227+ "weights",
10228+ no_bias,
10229+ 1,
10230+ ov::Strides{1},
10231+ ov::Strides{1},
10232+ ov::CoordinateDiff{0},
10233+ ov::CoordinateDiff{0},
10234+ false);
10235+ auto output_reorder = reorder("reorder", input_info("conv"), format::bfyx, data_types::f32 );
10236+
10237+ topology t(input, weights, conv, output_reorder);
10238+ topology t_ref(input_ref, weights, conv, output_reorder);
10239+
10240+ ExecutionConfig config_test_dynamic = get_test_default_config(engine);
10241+ config_test_dynamic.set_property(ov::intel_gpu::optimize_data(true));
10242+ config_test_dynamic.set_property(ov::intel_gpu::allow_new_shape_infer(true));
10243+
10244+ ExecutionConfig config_ref = get_test_default_config(engine);
10245+ ov::intel_gpu::ImplementationDesc conv_impl_ref = { format::bfyx, "", impl_types::ocl };
10246+ config_ref.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{{ "conv", conv_impl_ref } }));
10247+ config_ref.set_property(ov::intel_gpu::optimize_data(true));
10248+ config_ref.set_property(ov::intel_gpu::allow_new_shape_infer(true));
10249+
10250+ network network_ref(engine, t_ref, config_ref);
10251+ network_ref.set_input_data("input", input_mem);
10252+ auto outputs_ref = network_ref.execute();
10253+ ASSERT_EQ(outputs_ref.size(), size_t(1));
10254+ ASSERT_EQ(outputs_ref.begin()->first, "reorder");
10255+ auto output_memory_ref = outputs_ref.at("reorder").get_memory();
10256+ auto output_layout_ref = output_memory_ref->get_layout();
10257+ cldnn::mem_lock<float> output_ptr_ref(output_memory_ref, get_test_stream());
10258+
10259+ network network_test_dynamic(engine, t, config_test_dynamic);
10260+ network_test_dynamic.set_input_data("input", input_mem);
10261+ auto outputs_test_dynamic = network_test_dynamic.execute();
10262+ ASSERT_EQ(outputs_test_dynamic.size(), size_t(1));
10263+ ASSERT_EQ(outputs_test_dynamic.begin()->first, "reorder");
10264+ auto output_memory_test_dynamic = outputs_test_dynamic.at("reorder").get_memory();
10265+ auto output_layout_test_dynamic = output_memory_test_dynamic->get_layout();
10266+ cldnn::mem_lock<float> output_ptr_test_dynamic(output_memory_test_dynamic, get_test_stream());
10267+
10268+ ov::PartialShape expected_shape = {1, 16, 4};
10269+ ASSERT_EQ(output_layout_test_dynamic.get_partial_shape(), expected_shape);
10270+ ASSERT_EQ(output_layout_ref.get_partial_shape(), expected_shape);
10271+
10272+ for (size_t i = 0; i < output_memory_ref->count(); i++) {
10273+ ASSERT_EQ(output_ptr_ref.data()[i] , output_ptr_test_dynamic.data()[i]);
10274+ }
10275+ }
10276+
1017710277TEST(convolution_gpu_onednn, spatial_1d_quantize_post_ops_blocked_format) {
1017810278 auto& engine = get_test_engine();
1017910279 if (!engine.get_device_info().supports_immad)
0 commit comments