Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python type mismatch #1082

Merged
merged 31 commits into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
3784788
reversed python binding order
borbrudar Jul 31, 2024
1a4b8ba
addTensor now takes an additional (optional) parameter
borbrudar Jul 31, 2024
32b8ea1
reworked nndata, bindings dont work yet
borbrudar Jul 31, 2024
5d8eeeb
template not needed
borbrudar Jul 31, 2024
59202d4
further template rework
borbrudar Jul 31, 2024
5f4dba6
Merge branch 'auto-reconnect' into v3_develop
borbrudar Aug 1, 2024
534b122
compiles at least
borbrudar Aug 1, 2024
3d08053
mvp
borbrudar Aug 1, 2024
d29b922
Merge branch 'luxonis:v3_develop' into v3_develop
borbrudar Sep 16, 2024
c1e3cde
added switch
borbrudar Sep 16, 2024
69b8c0a
Delete _3rdParty/Hunter directory
borbrudar Sep 16, 2024
1a4c1e3
Delete CMakeFiles directory
borbrudar Sep 16, 2024
584154f
Delete generated directory
borbrudar Sep 16, 2024
9ec071c
removed erronous dependency
borbrudar Sep 16, 2024
18c1ecf
added dynamic dispatch instead of many static functions
borbrudar Sep 17, 2024
a4cb609
made function binding more explicit
borbrudar Sep 17, 2024
48db165
python arrays implicitly converted to work
borbrudar Sep 17, 2024
98d4afc
added C++ addTensor overloads
borbrudar Sep 17, 2024
cec87c4
added fp64 support
borbrudar Sep 17, 2024
b330336
added fp64 binding
borbrudar Sep 18, 2024
6dd14ac
fixed fp16 type conversion
borbrudar Sep 18, 2024
de1fb34
removed addTensor overloads
borbrudar Sep 18, 2024
945f0d6
removed debug statements
borbrudar Sep 18, 2024
aea2f7a
added std::vector overloads
borbrudar Sep 18, 2024
0e3daa6
defaulting doubles to fp32
borbrudar Sep 18, 2024
3eb3d25
Merge remote-tracking branch 'origin/v3_develop' into python-type-mis…
borbrudar Sep 18, 2024
d1d8125
reordered
borbrudar Sep 18, 2024
f275e55
added tests
borbrudar Sep 18, 2024
b062bd7
readded ifdefs
borbrudar Sep 19, 2024
62dea76
added tests
borbrudar Sep 19, 2024
f03c5f7
reintroduced templated arguments for backwards compatibility, tests pass
borbrudar Sep 19, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 44 additions & 9 deletions bindings/python/src/pipeline/datatype/NNDataBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,14 @@
//pybind
#include <pybind11/chrono.h>
#include <pybind11/numpy.h>
#include <pybind11/stl.h>

//xtensor
#define FORCE_IMPORT_ARRAY
#include <xtensor-python/pyarray.hpp>

#include "fp16/fp16.h"

// #include "spdlog/spdlog.h"

void bind_nndata(pybind11::module& m, void* pCallstack){
Expand Down Expand Up @@ -90,6 +93,7 @@ void bind_nndata(pybind11::module& m, void* pCallstack){
.value("INT", TensorInfo::DataType::INT)
.value("FP32", TensorInfo::DataType::FP32)
.value("I8", TensorInfo::DataType::I8)
.value("FP64", TensorInfo::DataType::FP64)
;

tensorInfoStorageOrder
Expand Down Expand Up @@ -182,26 +186,57 @@ void bind_nndata(pybind11::module& m, void* pCallstack){
// .def("setTimestampDevice", &NNData::setTimestampDevice, DOC(dai, NNData, setTimestampDevice))
// .def("setSequenceNum", &NNData::setSequenceNum, DOC(dai, NNData, setSequenceNum))

.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const std::vector<int>&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const std::vector<float>&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const std::vector<double>&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor))

.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const std::vector<int>&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const std::vector<float>&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const std::vector<double>&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor))

.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const xt::xarray<int>&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const xt::xarray<float>&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const xt::xarray<double>&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2))

.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const xt::xarray<int>&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor, 2))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const xt::xarray<float>&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor, 2))
.def("addTensor", static_cast<NNData&(NNData::*)(const std::string&, const xt::xarray<double>&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor, 2))

.def("addTensor", [](NNData&obj, const std::string&name, py::object tensor_obj, dai::TensorInfo::DataType dataType){
auto tensor = py::array(tensor_obj);
if (dataType == dai::TensorInfo::DataType::INT)
obj.addTensor<int>(name, tensor.cast<xt::xarray<int>>(), dai::TensorInfo::DataType::INT);
else if (dataType == dai::TensorInfo::DataType::FP32)
obj.addTensor<float>(name, tensor.cast<xt::xarray<float>>(), dai::TensorInfo::DataType::FP32);
else if(dataType == dai::TensorInfo::DataType::FP64)
obj.addTensor<double>(name, tensor.cast<xt::xarray<double>>(), dai::TensorInfo::DataType::FP64);
else if(dataType == dai::TensorInfo::DataType::FP16)
obj.addTensor<double>(name, tensor.cast<xt::xarray<float>>(), dai::TensorInfo::DataType::FP16);
else if (dataType == dai::TensorInfo::DataType::U8F)
obj.addTensor<uint8_t>(name, tensor.cast<xt::xarray<uint8_t>>(), dai::TensorInfo::DataType::U8F);
else if (dataType == dai::TensorInfo::DataType::I8)
obj.addTensor<int8_t>(name, tensor.cast<xt::xarray<int8_t>>(), dai::TensorInfo::DataType::I8);
else throw std::runtime_error("Unsupported datatype");
}, py::arg("name"), py::arg("tensor"), py::arg("dataType"), DOC(dai, NNData, addTensor))

.def("addTensor", [](NNData&obj, const std::string &name, py::object tensor_obj){
auto tensor = py::array(tensor_obj);
auto dtype = tensor.dtype();
if (dtype.is(py::dtype::of<float>()) || dtype.is(py::dtype::of<double>())) {
obj.addTensor<float>(name, tensor.cast<xt::xarray<float>>(), dai::TensorInfo::DataType::FP32);
} else if (dtype.is(py::dtype::of<int>()) || dtype.is(py::dtype::of<int64_t>()) ) {
obj.addTensor<int>(name, tensor.cast<xt::xarray<int>>(), dai::TensorInfo::DataType::INT);
} else if (dtype.is(py::dtype("float16"))) {
obj.addTensor<double>(name, tensor.cast<xt::xarray<float>>(), dai::TensorInfo::DataType::FP16);
} else if(dtype.is(py::dtype::of<int8_t>())){
obj.addTensor<int8_t>(name, tensor.cast<xt::xarray<int8_t>>(), dai::TensorInfo::DataType::I8);
} else if(dtype.is(py::dtype::of<uint8_t>())){
obj.addTensor<uint8_t>(name, tensor.cast<xt::xarray<uint8_t>>(), dai::TensorInfo::DataType::U8F);
} else throw std::runtime_error("Unsupported object type");
}, py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2))


.def("getTensor", [](NNData& obj, const std::string& name, bool dequantize) -> py::object {
const auto datatype = obj.getTensorDatatype(name);
if(datatype == dai::TensorInfo::DataType::U8F && !dequantize) {
if((datatype == dai::TensorInfo::DataType::U8F && !dequantize) ||
(datatype == dai::TensorInfo::DataType::I8 && !dequantize) ||
(datatype == dai::TensorInfo::DataType::INT && !dequantize)) {
// In case of dequantization, we should always return float
return py::cast(obj.getTensor<int>(name));
} else if(datatype == dai::TensorInfo::DataType::FP64) {
return py::cast(obj.getTensor<double>(name, dequantize));
} else {
return py::cast(obj.getTensor<float>(name, dequantize));
}
Expand Down
24 changes: 16 additions & 8 deletions bindings/python/tests/nndata_tensor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,28 +4,36 @@

def test_nndata_tensor():
nndata = dai.NNData()

tensorA = np.random.rand(3,3,3,3)
tensorB = np.random.randint(2**8, size=(3,3,3,3))
tensorC = [1,2,3,4,5]
tensorC = [[1,5],[1,1],[2,3], [1,2]]
tensorD = [1.1, 2.1, 3.1, 4.1, 5.1]
tensorE = ["string", "string2"]

nndata.addTensor("a", tensorA)
nndata.addTensor("a", tensorA.astype(np.float16))
nndata.addTensor("b", tensorB)
nndata.addTensor("c", tensorC)
nndata.addTensor("d", tensorD)
nndata.addTensor("d", np.array(tensorD).astype(np.float32))
nndata.addTensor("dd", tensorD)
#nndata.addTensor("e", tensorE) # This should fail
nndata.addTensor("f", tensorB.astype(np.float16))
nndata.addTensor("g", tensorA)

assert(nndata.getTensorDatatype("a") == dai.TensorInfo.DataType.FP16)
assert(nndata.getTensorDatatype("b") == dai.TensorInfo.DataType.U8F)
assert(nndata.getTensorDatatype("c") == dai.TensorInfo.DataType.U8F)
assert(nndata.getTensorDatatype("d") == dai.TensorInfo.DataType.FP16)
assert(nndata.getTensorDatatype("b") == dai.TensorInfo.DataType.INT)
assert(nndata.getTensorDatatype("c") == dai.TensorInfo.DataType.INT)
assert(nndata.getTensorDatatype("d") == dai.TensorInfo.DataType.FP32)
assert(nndata.getTensorDatatype("dd") == dai.TensorInfo.DataType.FP32)
assert(nndata.getTensorDatatype("f") == dai.TensorInfo.DataType.FP16)
assert(nndata.getTensorDatatype("g") == dai.TensorInfo.DataType.FP32)

assert(np.allclose(nndata.getTensor("a"), tensorA, atol=0.002))
assert((nndata.getTensor("b") == tensorB).all())
assert((nndata.getTensor("c") == tensorC).all())
assert(np.allclose(nndata.getTensor("d"), tensorD, atol=0.002))

assert(np.allclose(nndata.getFirstTensor(), tensorA, atol=0.002))
assert(np.allclose(nndata.getFirstTensor(), tensorA, atol=0.002))

if __name__ == '__main__':
test_nndata_tensor()
3 changes: 3 additions & 0 deletions include/depthai/common/TensorInfo.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ struct TensorInfo {
INT = 2, // Signed integer (4 byte)
FP32 = 3, // Single precision floating point
I8 = 4, // Signed byte
FP64 = 5, // Double precision floating point
};

void validateStorageOrder() {
Expand Down Expand Up @@ -83,6 +84,8 @@ struct TensorInfo {
case DataType::INT:
case DataType::FP32:
return sizeof(float);
case DataType::FP64:
return sizeof(double);
default:
return 0;
break;
Expand Down
125 changes: 112 additions & 13 deletions include/depthai/pipeline/datatype/NNData.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ class NNData : public Buffer {
*/
span<std::uint8_t> emplaceTensor(TensorInfo& tensor);

#ifdef DEPTHAI_XTENSOR_SUPPORT
//#ifdef DEPTHAI_XTENSOR_SUPPORT
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is still needed?

/**
* @brief Add a tensor to this NNData object.
* The provided array is stored as a 1xN tensor where N is the length of the array.
Expand All @@ -208,8 +208,47 @@ class NNData : public Buffer {
* @return NNData&: reference to this object
*/
template <typename _Ty = double>
NNData& addTensor(const std::string& name, const std::vector<_Ty>& data) {
return addTensor<_Ty>(name, xt::adapt(data, std::vector<size_t>{1, data.size()}));
NNData& addTensor(const std::string& name, const std::vector<_Ty>& data, dai::TensorInfo::DataType dataType) {
return addTensor<_Ty>(name, xt::adapt(data, std::vector<size_t>{1, data.size()}), dataType);
};
// addTensor vector overloads
NNData& addTensor(const std::string& name, const std::vector<int>& tensor) {
return addTensor<int>(name, tensor, dai::TensorInfo::DataType::INT);
};
NNData& addTensor(const std::string& name, const std::vector<uint16_t>& tensor) {
return addTensor<uint16_t>(name, tensor, dai::TensorInfo::DataType::FP16);
};
NNData& addTensor(const std::string& name, const std::vector<float>& tensor) {
return addTensor<float>(name, tensor, dai::TensorInfo::DataType::FP32);
};
NNData& addTensor(const std::string& name, const std::vector<double>& tensor) {
return addTensor<double>(name, tensor, dai::TensorInfo::DataType::FP64);
};
NNData& addTensor(const std::string& name, const std::vector<std::int8_t>& tensor) {
return addTensor<std::int8_t>(name, tensor, dai::TensorInfo::DataType::I8);
};
NNData& addTensor(const std::string& name, const std::vector<std::uint8_t>& tensor) {
return addTensor<std::uint8_t>(name, tensor, dai::TensorInfo::DataType::U8F);
};

// addTensor overloads
NNData& addTensor(const std::string& name, const xt::xarray<int>& tensor) {
return addTensor<int>(name, tensor, dai::TensorInfo::DataType::INT);
};
NNData& addTensor(const std::string& name, const xt::xarray<uint16_t>& tensor) {
return addTensor<uint16_t>(name, tensor, dai::TensorInfo::DataType::FP16);
};
NNData& addTensor(const std::string& name, const xt::xarray<float>& tensor) {
return addTensor<float>(name, tensor, dai::TensorInfo::DataType::FP32);
};
NNData& addTensor(const std::string& name, const xt::xarray<double>& tensor) {
return addTensor<double>(name, tensor, dai::TensorInfo::DataType::FP64);
};
NNData& addTensor(const std::string& name, const xt::xarray<std::int8_t>& tensor) {
return addTensor<std::int8_t>(name, tensor, dai::TensorInfo::DataType::I8);
};
NNData& addTensor(const std::string& name, const xt::xarray<std::uint8_t>& tensor) {
return addTensor<std::uint8_t>(name, tensor, dai::TensorInfo::DataType::U8F);
};
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's add tests for these too

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added.


/**
Expand All @@ -226,6 +265,22 @@ class NNData : public Buffer {
return addTensor<_Ty>(name, xt::adapt(data, std::vector<size_t>{1, data.size()}), order);
};

/**
* @brief Add a tensor to this NNData object.
* Implicitly adds a TensorInfo::DataType
*
* @param name: Name of the tensor
* @param data: array
* @param order: Storage order of the tensor
* @return NNData&: reference to this object
*/

template <typename _Ty = double>
NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& data, TensorInfo::StorageOrder order) {
auto dataType = std::is_integral<_Ty>::value ? dai::TensorInfo::DataType::U8F : dai::TensorInfo::DataType::FP16;
return addTensor<_Ty>(name, data, dataType, order);
};

/**
* @brief Add a tensor to this NNData object. The storage order is picked based on the number of dimensions of the tensor.
* Float values are converted to FP16 and integers are cast to bytes.
Expand All @@ -235,7 +290,7 @@ class NNData : public Buffer {
* @return NNData&: reference to this object
*/
template <typename _Ty = double>
NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor) {
NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor, dai::TensorInfo::DataType dataType) {
TensorInfo::StorageOrder order;
switch(tensor.shape().size()) {
case 1:
Expand All @@ -253,7 +308,7 @@ class NNData : public Buffer {
default:
throw std::runtime_error("Unsupported tensor shape. Only 1D, 2D, 3D and 4D tensors are supported");
}
return addTensor(name, tensor, order);
return addTensor(name, tensor, dataType, order);
}

/**
Expand All @@ -266,8 +321,15 @@ class NNData : public Buffer {
* @return NNData&: reference to this object
*/
template <typename _Ty = double>
NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor, const TensorInfo::StorageOrder order) {
NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor, dai::TensorInfo::DataType dataType, const TensorInfo::StorageOrder order) {
static_assert(std::is_integral<_Ty>::value || std::is_floating_point<_Ty>::value, "Tensor type needs to be integral or floating point");
//if(dataType==dai::TensorInfo::DataType::FP32) std::cout<<"FP32\n";
//else if(dataType==dai::TensorInfo::DataType::FP16) std::cout<<"FP16\n";
//else if(dataType==dai::TensorInfo::DataType::INT) std::cout<<"INT\n";
//else if(dataType==dai::TensorInfo::DataType::I8) std::cout<<"I8\n";
//else if(dataType==dai::TensorInfo::DataType::U8F) std::cout<<"U8F\n";
//else if(dataType==dai::TensorInfo::DataType::FP64) std::cout<<"FP64\n";
//else std::cout<<"Unsupported type\n";

// Check if data is vector type of data
if(std::dynamic_pointer_cast<VectorMemory>(data) == nullptr) {
Expand All @@ -277,7 +339,23 @@ class NNData : public Buffer {
auto vecData = std::dynamic_pointer_cast<VectorMemory>(data);

// Get size in bytes of the converted tensor data, u8 for integral and fp16 for floating point
const size_t sConvertedData = std::is_integral<_Ty>::value ? tensor.size() : 2 * tensor.size();
//const size_t sConvertedData = std::is_integral<_Ty>::value ? tensor.size() : 2 * tensor.size();
size_t sConvertedData = tensor.size();
switch(dataType){
case dai::TensorInfo::DataType::FP64:
sConvertedData *= 8;
break;
case dai::TensorInfo::DataType::FP32:
case dai::TensorInfo::DataType::INT:
sConvertedData *= 4;
break;
case dai::TensorInfo::DataType::FP16:
sConvertedData *= 2;
break;
case dai::TensorInfo::DataType::U8F:
case dai::TensorInfo::DataType::I8:
break;
}

// Append bytes so that each new tensor is DATA_ALIGNMENT aligned
size_t remainder = std::distance(vecData->begin(), vecData->end()) % DATA_ALIGNMENT;
Expand All @@ -291,22 +369,38 @@ class NNData : public Buffer {
// Reserve space
vecData->resize(offset + sConvertedData);

// Convert data to u8 or fp16 and write to data
if(std::is_integral<_Ty>::value) {
// Convert data to appropriate data type and write to data
if(dataType == dai::TensorInfo::DataType::I8) {
for(uint32_t i = 0; i < tensor.size(); i++) {
vecData->data()[i + offset] = (uint8_t)tensor.data()[i];
vecData->data()[i + offset] = (int8_t)tensor.data()[i];
}
} else {
} else if(dataType == dai::TensorInfo::DataType::FP16) {
for(uint32_t i = 0; i < tensor.size(); i++) {
*(uint16_t*)(&vecData->data()[2 * i + offset]) = fp32_to_fp16(tensor.data()[i]);
}
} else if(dataType == dai::TensorInfo::DataType::FP32){
for(uint32_t i = 0; i < tensor.size(); i++) {
*(float*)(&vecData->data()[4 * i + offset]) = tensor.data()[i];
}
} else if(dataType == dai::TensorInfo::DataType::INT){
for(uint32_t i = 0; i < tensor.size(); i++) {
*(int32_t*)(&vecData->data()[4 * i + offset]) = tensor.data()[i];
}
} else if(dataType == dai::TensorInfo::DataType::U8F) {
for(uint32_t i = 0; i < tensor.size(); i++) {
vecData->data()[i + offset] = (uint8_t)tensor.data()[i];
}
}else if(dataType == dai::TensorInfo::DataType::FP64){
for(uint32_t i = 0; i < tensor.size(); i++) {
*(double*)(&vecData->data()[8 * i + offset]) = tensor.data()[i];
}
}

// Add entry in tensors
TensorInfo info;
info.name = name;
info.offset = static_cast<unsigned int>(offset);
info.dataType = std::is_integral<_Ty>::value ? TensorInfo::DataType::U8F : TensorInfo::DataType::FP16;
info.dataType = dataType;
info.numDimensions = tensor.dimension();
info.order = order;
for(uint32_t i = 0; i < tensor.dimension(); i++) {
Expand Down Expand Up @@ -374,6 +468,11 @@ class NNData : public Buffer {
tensor.data()[i] = reinterpret_cast<float_t*>(data->getData().data())[it->offset / sizeof(float_t) + i];
}
break;
case TensorInfo::DataType::FP64:
for(uint32_t i = 0; i < tensor.size(); i++) {
tensor.data()[i] = reinterpret_cast<double_t*>(data->getData().data())[it->offset / sizeof(double_t) + i];
}
break;
}
if(dequantize) {
if(it->quantization) {
Expand Down Expand Up @@ -518,7 +617,7 @@ class NNData : public Buffer {

return {};
}
#endif
//#endif
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Intentional?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

;)

void serialize(std::vector<std::uint8_t>& metadata, DatatypeEnum& datatype) const override {
metadata = utility::serialize(*this);
datatype = DatatypeEnum::NNData;
Expand Down