Skip to content

Commit

Permalink
Use same design for shared ptr
Browse files Browse the repository at this point in the history
  • Loading branch information
pereanub committed Nov 7, 2024
1 parent 884bdf7 commit b3ea642
Show file tree
Hide file tree
Showing 12 changed files with 24 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ namespace intel_npu {

class ZeroHostTensor : public ov::ITensor {
public:
ZeroHostTensor(std::shared_ptr<ov::IRemoteContext> context,
std::shared_ptr<ZeroInitStructsHolder> init_structs,
ZeroHostTensor(const std::shared_ptr<ov::IRemoteContext>& context,
const std::shared_ptr<ZeroInitStructsHolder>& init_structs,
const ov::element::Type element_type,
const ov::Shape& shape,
const Config& config);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,17 @@ class ZeroInferRequest final : public SyncInferRequest {
* @param index The index corresponding to the position of the tensor inside the I/O structures.
* @param isInput Used for identifying the structures to which the tensor belongs.
*/
void set_tensor_data(const std::shared_ptr<ov::ITensor> tensor, const size_t index, const bool isInput);
void set_tensor_data(const std::shared_ptr<ov::ITensor>& tensor, const size_t index, const bool isInput);

/**
* @brief Check the received remote tensor and copy it to the Level Zero tensor
* @param tensor Reference to a tensor.
* @param index The index corresponding to the position of the tensor inside the I/O structures.
* @param isInput Used for identifying the structures to which the tensor belongs.
*/
void set_remote_tensor_data(const std::shared_ptr<ZeroRemoteTensor> tensor, const size_t index, const bool isInput);
void set_remote_tensor_data(const std::shared_ptr<ZeroRemoteTensor>& tensor,
const size_t index,
const bool isInput);

void check_network_precision(const ov::element::Type_t precision) const override;
void create_pipeline();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ struct Pipeline {
const std::shared_ptr<IGraph>& graph,
zeroProfiling::ProfilingPool& profiling_pool,
zeroProfiling::ProfilingQuery& profiling_query,
std::shared_ptr<zeroProfiling::NpuInferProfiling> npu_profiling,
const std::shared_ptr<zeroProfiling::NpuInferProfiling>& npu_profiling,
const std::vector<std::vector<std::optional<TensorData>>>& inputTensorsData,
const std::vector<std::optional<TensorData>>& outputTensorsData,
size_t numberOfCommandLists,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ namespace intel_npu {

class ZeroRemoteTensor : public RemoteTensor {
public:
ZeroRemoteTensor(std::shared_ptr<ov::IRemoteContext> context,
std::shared_ptr<ZeroInitStructsHolder> init_structs,
ZeroRemoteTensor(const std::shared_ptr<ov::IRemoteContext>& context,
const std::shared_ptr<ZeroInitStructsHolder>& init_structs,
const ov::element::Type& element_type,
const ov::Shape& shape,
const Config& config,
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/backend/src/zero_host_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@

namespace intel_npu {

ZeroHostTensor::ZeroHostTensor(std::shared_ptr<ov::IRemoteContext> context,
std::shared_ptr<ZeroInitStructsHolder> init_structs,
ZeroHostTensor::ZeroHostTensor(const std::shared_ptr<ov::IRemoteContext>& context,
const std::shared_ptr<ZeroInitStructsHolder>& init_structs,
const ov::element::Type element_type,
const ov::Shape& shape,
const Config& config)
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ void ZeroInferRequest::create_pipeline() {
_logger.debug("ZeroInferRequest::create_pipeline - SyncInferRequest completed");
}

void ZeroInferRequest::set_tensor_data(const std::shared_ptr<ov::ITensor> tensor,
void ZeroInferRequest::set_tensor_data(const std::shared_ptr<ov::ITensor>& tensor,
const size_t index,
const bool isInput) {
OV_ITT_TASK_CHAIN(ZERO_SET_TENSOR, itt::domains::LevelZeroBackend, "set_tensor", "set_tensor_data");
Expand Down Expand Up @@ -347,7 +347,7 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr<ov::ITensor> tensor
}
}

void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptr<ZeroRemoteTensor> tensor,
void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptr<ZeroRemoteTensor>& tensor,
const size_t index,
const bool isInput) {
OV_ITT_TASK_CHAIN(ZERO_SET_REMOTE_TENSOR, itt::domains::LevelZeroBackend, "set_tensor", "set_remote_tensor_data");
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Pipeline::Pipeline(const Config& config,
const std::shared_ptr<IGraph>& graph,
zeroProfiling::ProfilingPool& profiling_pool,
zeroProfiling::ProfilingQuery& profiling_query,
std::shared_ptr<zeroProfiling::NpuInferProfiling> npu_profiling,
const std::shared_ptr<zeroProfiling::NpuInferProfiling>& npu_profiling,
const std::vector<std::vector<std::optional<TensorData>>>& inputTensorsData,
const std::vector<std::optional<TensorData>>& outputTensorsData,
size_t numberOfCommandLists,
Expand All @@ -30,7 +30,7 @@ Pipeline::Pipeline(const Config& config,
_event_pool{initStructs->getDevice(),
initStructs->getContext(),
numberOfCommandLists ? static_cast<uint32_t>(numberOfCommandLists) : 1},
_npu_profiling(std::move(npu_profiling)),
_npu_profiling(npu_profiling),
_logger("Pipeline", _config.get<LOG_LEVEL>()) {
OV_ITT_SCOPED_TASK(itt::domains::LevelZeroBackend, "Zero_infer_request::Pipeline::Pipeline");
_logger.debug("Pipeline - initialize started");
Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,18 @@ constexpr std::size_t STANDARD_PAGE_SIZE = 4096;

namespace intel_npu {

ZeroRemoteTensor::ZeroRemoteTensor(std::shared_ptr<ov::IRemoteContext> context,
std::shared_ptr<ZeroInitStructsHolder> init_structs,
ZeroRemoteTensor::ZeroRemoteTensor(const std::shared_ptr<ov::IRemoteContext>& context,
const std::shared_ptr<ZeroInitStructsHolder>& init_structs,
const ov::element::Type& element_type,
const ov::Shape& shape,
const Config& config,
TensorType tensor_type,
MemType mem_type,
void* mem)
: RemoteTensor(std::move(context), element_type, shape),
: RemoteTensor(context, element_type, shape),
_config(config),
_logger("ZeroRemoteContext", _config.get<LOG_LEVEL>()),
_init_structs(std::move(init_structs)),
_init_structs(init_structs),
_tensor_type(tensor_type),
_mem_type(mem_type),
_mem(mem) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ namespace intel_npu {
*/
class RemoteTensor : public ov::IRemoteTensor {
public:
RemoteTensor(std::shared_ptr<ov::IRemoteContext> context,
RemoteTensor(const std::shared_ptr<ov::IRemoteContext>& context,
const ov::element::Type& element_type,
const ov::Shape& shape);

Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/common/src/remote_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@

namespace intel_npu {

RemoteTensor::RemoteTensor(std::shared_ptr<ov::IRemoteContext> context,
RemoteTensor::RemoteTensor(const std::shared_ptr<ov::IRemoteContext>& context,
const ov::element::Type& element_type,
const ov::Shape& shape)
: _context(std::move(context)),
: _context(context),
_element_type(element_type),
_shape(shape),
_capacity(shape) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ namespace intel_npu {

class RemoteContextImpl : public ov::IRemoteContext {
public:
RemoteContextImpl(std::shared_ptr<const NPUBackends> backends, const Config& config);
RemoteContextImpl(const std::shared_ptr<const NPUBackends>& backends, const Config& config);

/**
* @brief Returns name of a device on which underlying object is allocated.
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_npu/src/plugin/src/remote_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ std::optional<Type> extract_object(const ov::AnyMap& params, const ov::Property<

namespace intel_npu {

RemoteContextImpl::RemoteContextImpl(std::shared_ptr<const NPUBackends> backends, const Config& config)
RemoteContextImpl::RemoteContextImpl(const std::shared_ptr<const NPUBackends>& backends, const Config& config)
: _backends(backends),
_config(config),
_properties({l0_context(backends->getContext())}),
Expand Down

0 comments on commit b3ea642

Please sign in to comment.