Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sharing mData between device and staging tensors #130

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4,200 changes: 2,116 additions & 2,084 deletions single_include/kompute/Kompute.hpp

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion src/OpTensorCreate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ OpTensorCreate::init()
tensor->init(this->mPhysicalDevice, this->mDevice);

std::shared_ptr<Tensor> stagingTensor = std::make_shared<Tensor>(
tensor->data(), Tensor::TensorTypes::eStaging);
tensor->data_sp(), Tensor::TensorTypes::eStaging);

stagingTensor->init(this->mPhysicalDevice, this->mDevice);

Expand Down
2 changes: 1 addition & 1 deletion src/OpTensorSyncDevice.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ OpTensorSyncDevice::init()
if (tensor->tensorType() == Tensor::TensorTypes::eDevice) {

std::shared_ptr<Tensor> stagingTensor = std::make_shared<Tensor>(
tensor->data(), Tensor::TensorTypes::eStaging);
tensor->data_sp(), Tensor::TensorTypes::eStaging);

stagingTensor->init(this->mPhysicalDevice, this->mDevice);

Expand Down
2 changes: 1 addition & 1 deletion src/OpTensorSyncLocal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ OpTensorSyncLocal::init()
if (tensor->tensorType() == Tensor::TensorTypes::eDevice) {

std::shared_ptr<Tensor> stagingTensor = std::make_shared<Tensor>(
tensor->data(), Tensor::TensorTypes::eStaging);
tensor->data_sp(), Tensor::TensorTypes::eStaging);

stagingTensor->init(this->mPhysicalDevice, this->mDevice);

Expand Down
39 changes: 28 additions & 11 deletions src/Tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,29 @@ Tensor::Tensor()
this->mTensorType = TensorTypes::eDevice;
}

Tensor::Tensor(const std::vector<float>& data, TensorTypes tensorType)
Tensor::Tensor(const std::shared_ptr<std::vector<float>>& data, TensorTypes tensorType)
{
#if DEBUG
SPDLOG_DEBUG("Kompute Tensor constructor data length: {}, and type: {}",
data.size(),
SPDLOG_DEBUG("Kompute Tensor shared_ptr constructor data length: {}, and type: {}",
data->size(),
tensorType);
#endif

this->mData = data;
this->mShape = { static_cast<uint32_t>(data.size()) };
this->mShape = { static_cast<uint32_t>(data->size()) };
this->mTensorType = tensorType;
}


Tensor::Tensor(const std::vector<float>& data, TensorTypes tensorType):
Tensor(std::make_shared<std::vector<float>>(data), tensorType)
{}

Tensor::Tensor(std::initializer_list<float> data, TensorTypes tensorType):
Tensor(std::make_shared<std::vector<float>>(data), tensorType)
{}


Tensor::~Tensor()
{
SPDLOG_DEBUG("Kompute Tensor destructor started. Type: {}",
Expand All @@ -39,8 +49,8 @@ Tensor::init(std::shared_ptr<vk::PhysicalDevice> physicalDevice,
std::shared_ptr<vk::Device> device)
{
SPDLOG_DEBUG("Kompute Tensor running init with Vulkan params and num data "
"elementS: {}",
this->mData.size());
"elements: {}",
this->mData->size());

this->mPhysicalDevice = physicalDevice;
this->mDevice = device;
Expand All @@ -52,14 +62,21 @@ Tensor::init(std::shared_ptr<vk::PhysicalDevice> physicalDevice,

std::vector<float>&
Tensor::data()
{
return *this->mData;
}

std::shared_ptr<std::vector<float>>
Tensor::data_sp()
{
return this->mData;
}


float&
Tensor::operator[](int index)
{
return this->mData[index];
return this->mData->at(index);
}

uint64_t
Expand Down Expand Up @@ -95,11 +112,11 @@ Tensor::isInit()
void
Tensor::setData(const std::vector<float>& data)
{
if (data.size() != this->mData.size()) {
if (data.size() != this->mData->size()) {
throw std::runtime_error(
"Kompute Tensor Cannot set data of different sizes");
}
this->mData = data;
this->mData = std::make_shared<std::vector<float>>(data);
}

void
Expand Down Expand Up @@ -186,7 +203,7 @@ Tensor::mapDataFromHostMemory()
*this->mMemory, 0, bufferSize, vk::MemoryMapFlags());
vk::MappedMemoryRange mappedMemoryRange(*this->mMemory, 0, bufferSize);
this->mDevice->invalidateMappedMemoryRanges(mappedMemoryRange);
memcpy(this->mData.data(), mapped, bufferSize);
memcpy(this->mData->data(), mapped, bufferSize);
this->mDevice->unmapMemory(*this->mMemory);
}

Expand All @@ -206,7 +223,7 @@ Tensor::mapDataIntoHostMemory()

void* mapped = this->mDevice->mapMemory(
*this->mMemory, 0, bufferSize, vk::MemoryMapFlags());
memcpy(mapped, this->mData.data(), bufferSize);
memcpy(mapped, this->mData->data(), bufferSize);
vk::MappedMemoryRange mappedRange(*this->mMemory, 0, bufferSize);
this->mDevice->flushMappedMemoryRanges(1, &mappedRange);
this->mDevice->unmapMemory(*this->mMemory);
Expand Down
36 changes: 35 additions & 1 deletion src/include/kompute/Tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,28 @@ class Tensor
*/
Tensor(const std::vector<float>& data,
TensorTypes tensorType = TensorTypes::eDevice);

/**
* Constructor with a shared pointer to data provided which would be used to create the
* respective vulkan buffer and memory.
*
* @param data Shared pointer to a non-zero-sized vector of data that will be used by the tensor
* @param tensorType Type for the tensor which is of type TensorTypes
*/
Tensor(const std::shared_ptr<std::vector<float>>& data,
TensorTypes tensorType = TensorTypes::eDevice);


/**
* Constructor with an initializer list which would be used to create the
* respective vulkan buffer and memory.
*
* @param data Shared pointer to a non-zero-sized vector of data that will be used by the tensor
* @param tensorType Type for the tensor which is of type TensorTypes
*/
Tensor(std::initializer_list<float> data,
TensorTypes tensorType = TensorTypes::eDevice);


/**
* Destructor which is in charge of freeing vulkan resources unless they
Expand Down Expand Up @@ -74,6 +96,17 @@ class Tensor
* tensor.
*/
std::vector<float>& data();

/**
* Returns a shared pointer to the vector of data currently contained
* by the Tensor. It is important to ensure that there is no out-of-sync
* data with the GPU memory.
*
* @return Reference to vector of elements representing the data in the
* tensor.
*/
std::shared_ptr<std::vector<float>> data_sp();

/**
* Overrides the subscript operator to expose the underlying data's
* subscript operator which in this case would be its underlying
Expand Down Expand Up @@ -178,8 +211,9 @@ class Tensor
std::shared_ptr<vk::DeviceMemory> mMemory;
bool mFreeMemory;

std::shared_ptr<std::vector<float>> mData;

// -------------- ALWAYS OWNED RESOURCES
std::vector<float> mData;

TensorTypes mTensorType = TensorTypes::eDevice;

Expand Down