Skip to content

Commit

Permalink
[GNA] Add missing metrics to GNAExecutableNetwork (openvinotoolkit#14680
Browse files Browse the repository at this point in the history
)

* Added NETWORK_NAME, SUPPORTED_CONFIG_KEYS and SUPPORTED_METRICS metrics to GNAExecutableNetwork

* Added unit tests for GNAExecutableNetwork metrics

* Moved model name from GNAExecutableNetwork to GNAPlugin
  • Loading branch information
rjeziers authored and dood-apo committed Aug 24, 2023
1 parent 4532a70 commit d0b21e2
Show file tree
Hide file tree
Showing 4 changed files with 77 additions and 3 deletions.
3 changes: 2 additions & 1 deletion src/gna_plugin.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2022 Intel Corporation
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -647,6 +647,7 @@ void GNAPlugin::AddDebugProperties(const InferenceEngine::CNNLayerPtr layer,

void GNAPlugin::LoadNetwork(const CNNNetwork& _network) {
OV_ITT_SCOPED_TASK(itt::domains::GNAPlugin, "LoadNetwork");
_network_name = _network.getName();
std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;

const auto effectiveGnaCompileTargetValue = effectiveGnaCompileTarget();
Expand Down
4 changes: 3 additions & 1 deletion src/gna_plugin.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2022 Intel Corporation
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -67,6 +67,8 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
InferenceEngine::InputsDataMap inputs_data_map_; //!< Holds information about network inputs info
InferenceEngine::OutputsDataMap outputs_data_map_; //!< Holds information about network outputs data

std::string _network_name;

std::vector<InferenceEngine::IVariableStateInternal::Ptr> memoryStates;
bool trivialTopology = false;

Expand Down
4 changes: 3 additions & 1 deletion src/gna_plugin_query_api.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2022 Intel Corporation
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -59,6 +59,8 @@ Parameter GNAPlugin::GetMetric(const std::string& name, const std::map<std::stri
return GNADeviceHelper::GetGnaLibraryVersion();
} else if (ov::execution_devices == name) {
return decltype(ov::execution_devices)::value_type {GetName()};
} else if (ov::model_name == name) {
return _network_name;
} else {
const std::unordered_map<std::string, std::function<Parameter()>> queryApiSupported = {
{METRIC_KEY(AVAILABLE_DEVICES), [this]() {return GetAvailableDevices();}},
Expand Down
69 changes: 69 additions & 0 deletions tests/unit/gna_executable_network_metrics_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>

#include "any_copy.hpp"
#include <ie_system_conf.h>
#include "ngraph_functions/builders.hpp"

#include "gna_executable_network.hpp"
#include "gna_plugin.hpp"
#include "memory/gna_memory.hpp"

using namespace ov::intel_gna;
using namespace InferenceEngine;


class GNAPluginForNetworkMetricsTest : public GNAPlugin {
public:
GNAPluginForNetworkMetricsTest(const std::map<std::string, std::string>& configMap) : GNAPlugin(configMap) {
gnamem.reset(new gna_memory_float(memory::GNAFloatAllocator{}));
graphCompiler.setGNAMemoryPtr(gnamem);
gnadevice.reset();
}
};

class GnaExecutableNetworkMetricsTest : public ::testing::Test {
public:
void Run(const std::string& metricName, const std::string& expectedResult) {
ov::AnyMap gnaConfig = {
ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT),
};
auto plugin = std::make_shared<GNAPluginForNetworkMetricsTest>(ov::any_copy(gnaConfig));
auto function = getFunction();
CNNNetwork cnnNetwork(function);
GNAExecutableNetwork gnaNetwork = GNAExecutableNetwork(cnnNetwork, plugin);
std::string result = gnaNetwork.GetMetric(metricName);
ASSERT_EQ(result, expectedResult);
}

protected:
std::shared_ptr<ov::Model> getFunction() {
auto firstInput = std::make_shared<ngraph::opset8::Parameter>(net_precision, shape);
auto secondInput = std::make_shared<ngraph::opset8::Constant>(net_precision, shape);
auto matmul = std::make_shared<ngraph::opset8::MatMul>(firstInput, secondInput, false, true);
auto result = std::make_shared<ngraph::opset8::Result>(matmul);
auto function =
std::make_shared<ov::Model>(ov::ResultVector({result}), ov::ParameterVector({firstInput}), "MatMul");
return function;
}
const ngraph::element::Type net_precision = ngraph::element::f32;
const ngraph::Shape shape = {1, 10};
};


TEST_F(GnaExecutableNetworkMetricsTest, TestNetworkName) {
Run(ov::model_name.name(), "MatMul");
}

TEST_F(GnaExecutableNetworkMetricsTest, TestSupportedProperties) {
std::string supportedProperties =
"SUPPORTED_PROPERTIES AVAILABLE_DEVICES OPTIMAL_NUMBER_OF_INFER_REQUESTS RANGE_FOR_ASYNC_INFER_REQUESTS "
"OPTIMIZATION_CAPABILITIES FULL_DEVICE_NAME GNA_LIBRARY_FULL_VERSION GNA_SCALE_FACTOR_PER_INPUT "
"GNA_FIRMWARE_MODEL_IMAGE GNA_DEVICE_MODE GNA_HW_EXECUTION_TARGET GNA_HW_COMPILE_TARGET "
"GNA_PWL_DESIGN_ALGORITHM GNA_PWL_MAX_ERROR_PERCENT PERFORMANCE_HINT INFERENCE_PRECISION_HINT "
"PERFORMANCE_HINT_NUM_REQUESTS LOG_LEVEL EXECUTION_DEVICES";
Run(ov::supported_properties.name(), supportedProperties);
}

0 comments on commit d0b21e2

Please sign in to comment.