diff --git a/bindings/python/src/pipeline/datatype/NNDataBindings.cpp b/bindings/python/src/pipeline/datatype/NNDataBindings.cpp index 4d7f68be3..015f6ebe4 100644 --- a/bindings/python/src/pipeline/datatype/NNDataBindings.cpp +++ b/bindings/python/src/pipeline/datatype/NNDataBindings.cpp @@ -10,11 +10,14 @@ //pybind #include #include +#include //xtensor #define FORCE_IMPORT_ARRAY #include +#include "fp16/fp16.h" + // #include "spdlog/spdlog.h" void bind_nndata(pybind11::module& m, void* pCallstack){ @@ -90,6 +93,7 @@ void bind_nndata(pybind11::module& m, void* pCallstack){ .value("INT", TensorInfo::DataType::INT) .value("FP32", TensorInfo::DataType::FP32) .value("I8", TensorInfo::DataType::I8) + .value("FP64", TensorInfo::DataType::FP64) ; tensorInfoStorageOrder @@ -182,26 +186,57 @@ void bind_nndata(pybind11::module& m, void* pCallstack){ // .def("setTimestampDevice", &NNData::setTimestampDevice, DOC(dai, NNData, setTimestampDevice)) // .def("setSequenceNum", &NNData::setSequenceNum, DOC(dai, NNData, setSequenceNum)) - .def("addTensor", static_cast&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor)) - .def("addTensor", static_cast&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor)) - .def("addTensor", static_cast&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor)) - .def("addTensor", static_cast&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor)) .def("addTensor", static_cast&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor)) .def("addTensor", static_cast&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor)) - .def("addTensor", static_cast&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2)) - .def("addTensor", static_cast&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2)) - .def("addTensor", static_cast&)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2)) - .def("addTensor", static_cast&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor, 2)) .def("addTensor", static_cast&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor, 2)) .def("addTensor", static_cast&, TensorInfo::StorageOrder)>(&NNData::addTensor), py::arg("name"), py::arg("tensor"), py::arg("storageOrder"), DOC(dai, NNData, addTensor, 2)) + + .def("addTensor", [](NNData&obj, const std::string&name, py::object tensor_obj, dai::TensorInfo::DataType dataType){ + auto tensor = py::array(tensor_obj); + if (dataType == dai::TensorInfo::DataType::INT) + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::INT); + else if (dataType == dai::TensorInfo::DataType::FP32) + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::FP32); + else if(dataType == dai::TensorInfo::DataType::FP64) + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::FP64); + else if(dataType == dai::TensorInfo::DataType::FP16) + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::FP16); + else if (dataType == dai::TensorInfo::DataType::U8F) + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::U8F); + else if (dataType == dai::TensorInfo::DataType::I8) + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::I8); + else throw std::runtime_error("Unsupported datatype"); + }, py::arg("name"), py::arg("tensor"), py::arg("dataType"), DOC(dai, NNData, addTensor)) + + .def("addTensor", [](NNData&obj, const std::string &name, py::object tensor_obj){ + auto tensor = py::array(tensor_obj); + auto dtype = tensor.dtype(); + if (dtype.is(py::dtype::of()) || dtype.is(py::dtype::of())) { + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::FP32); + } else if (dtype.is(py::dtype::of()) || dtype.is(py::dtype::of()) ) { + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::INT); + } else if (dtype.is(py::dtype("float16"))) { + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::FP16); + } else if(dtype.is(py::dtype::of())){ + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::I8); + } else if(dtype.is(py::dtype::of())){ + obj.addTensor(name, tensor.cast>(), dai::TensorInfo::DataType::U8F); + } else throw std::runtime_error("Unsupported object type"); + }, py::arg("name"), py::arg("tensor"), DOC(dai, NNData, addTensor, 2)) + + .def("getTensor", [](NNData& obj, const std::string& name, bool dequantize) -> py::object { const auto datatype = obj.getTensorDatatype(name); - if(datatype == dai::TensorInfo::DataType::U8F && !dequantize) { + if((datatype == dai::TensorInfo::DataType::U8F && !dequantize) || + (datatype == dai::TensorInfo::DataType::I8 && !dequantize) || + (datatype == dai::TensorInfo::DataType::INT && !dequantize)) { // In case of dequantization, we should always return float return py::cast(obj.getTensor(name)); + } else if(datatype == dai::TensorInfo::DataType::FP64) { + return py::cast(obj.getTensor(name, dequantize)); } else { return py::cast(obj.getTensor(name, dequantize)); } diff --git a/bindings/python/tests/nndata_tensor_test.py b/bindings/python/tests/nndata_tensor_test.py index 1827de110..46d3d196e 100644 --- a/bindings/python/tests/nndata_tensor_test.py +++ b/bindings/python/tests/nndata_tensor_test.py @@ -4,28 +4,36 @@ def test_nndata_tensor(): nndata = dai.NNData() - + tensorA = np.random.rand(3,3,3,3) tensorB = np.random.randint(2**8, size=(3,3,3,3)) - tensorC = [1,2,3,4,5] + tensorC = [[1,5],[1,1],[2,3], [1,2]] tensorD = [1.1, 2.1, 3.1, 4.1, 5.1] + tensorE = ["string", "string2"] - nndata.addTensor("a", tensorA) + nndata.addTensor("a", tensorA.astype(np.float16)) nndata.addTensor("b", tensorB) nndata.addTensor("c", tensorC) - nndata.addTensor("d", tensorD) + nndata.addTensor("d", np.array(tensorD).astype(np.float32)) + nndata.addTensor("dd", tensorD) + #nndata.addTensor("e", tensorE) # This should fail + nndata.addTensor("f", tensorB.astype(np.float16)) + nndata.addTensor("g", tensorA) assert(nndata.getTensorDatatype("a") == dai.TensorInfo.DataType.FP16) - assert(nndata.getTensorDatatype("b") == dai.TensorInfo.DataType.U8F) - assert(nndata.getTensorDatatype("c") == dai.TensorInfo.DataType.U8F) - assert(nndata.getTensorDatatype("d") == dai.TensorInfo.DataType.FP16) + assert(nndata.getTensorDatatype("b") == dai.TensorInfo.DataType.INT) + assert(nndata.getTensorDatatype("c") == dai.TensorInfo.DataType.INT) + assert(nndata.getTensorDatatype("d") == dai.TensorInfo.DataType.FP32) + assert(nndata.getTensorDatatype("dd") == dai.TensorInfo.DataType.FP32) + assert(nndata.getTensorDatatype("f") == dai.TensorInfo.DataType.FP16) + assert(nndata.getTensorDatatype("g") == dai.TensorInfo.DataType.FP32) assert(np.allclose(nndata.getTensor("a"), tensorA, atol=0.002)) assert((nndata.getTensor("b") == tensorB).all()) assert((nndata.getTensor("c") == tensorC).all()) assert(np.allclose(nndata.getTensor("d"), tensorD, atol=0.002)) - assert(np.allclose(nndata.getFirstTensor(), tensorA, atol=0.002)) + assert(np.allclose(nndata.getFirstTensor(), tensorA, atol=0.002)) if __name__ == '__main__': test_nndata_tensor() \ No newline at end of file diff --git a/include/depthai/common/TensorInfo.hpp b/include/depthai/common/TensorInfo.hpp index 0bc0f77bd..ec2bb6bfc 100644 --- a/include/depthai/common/TensorInfo.hpp +++ b/include/depthai/common/TensorInfo.hpp @@ -34,6 +34,7 @@ struct TensorInfo { INT = 2, // Signed integer (4 byte) FP32 = 3, // Single precision floating point I8 = 4, // Signed byte + FP64 = 5, // Double precision floating point }; void validateStorageOrder() { @@ -83,6 +84,8 @@ struct TensorInfo { case DataType::INT: case DataType::FP32: return sizeof(float); + case DataType::FP64: + return sizeof(double); default: return 0; break; diff --git a/include/depthai/pipeline/datatype/NNData.hpp b/include/depthai/pipeline/datatype/NNData.hpp index 4a7aecfed..d45d205ac 100644 --- a/include/depthai/pipeline/datatype/NNData.hpp +++ b/include/depthai/pipeline/datatype/NNData.hpp @@ -208,8 +208,85 @@ class NNData : public Buffer { * @return NNData&: reference to this object */ template - NNData& addTensor(const std::string& name, const std::vector<_Ty>& data) { - return addTensor<_Ty>(name, xt::adapt(data, std::vector{1, data.size()})); + NNData& addTensor(const std::string& name, const std::vector<_Ty>& data, dai::TensorInfo::DataType dataType) { + return addTensor<_Ty>(name, xt::adapt(data, std::vector{1, data.size()}), dataType); + }; + // addTensor vector dispatch + template + NNData& addTensor(const std::string& name, const std::vector<_Ty>& tensor){ + if constexpr(std::is_same<_Ty,int>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::INT); + } else if constexpr(std::is_same<_Ty,uint16_t>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP16); + } else if constexpr(std::is_same<_Ty,float>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP32); + } else if constexpr(std::is_same<_Ty,double>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP64); + } else if constexpr(std::is_same<_Ty,std::int8_t>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::I8); + } else if constexpr(std::is_same<_Ty,std::uint8_t>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::U8F); + } else { + throw std::runtime_error("Unsupported datatype"); + } + } + + NNData& addTensor(const std::string& name, const std::vector& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::INT); + }; + NNData& addTensor(const std::string& name, const std::vector& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP16); + }; + NNData& addTensor(const std::string& name, const std::vector& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP32); + }; + NNData& addTensor(const std::string& name, const std::vector& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP64); + }; + NNData& addTensor(const std::string& name, const std::vector& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::I8); + }; + NNData& addTensor(const std::string& name, const std::vector& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::U8F); + }; + + // addTensor dispatch + template + NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor){ + if constexpr(std::is_same<_Ty,int>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::INT); + } else if (std::is_same<_Ty,uint16_t>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP16); + } else if constexpr(std::is_same<_Ty,float>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP32); + } else if constexpr(std::is_same<_Ty,double>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP64); + } else if constexpr(std::is_same<_Ty,std::int8_t>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::I8); + } else if constexpr(std::is_same<_Ty,std::uint8_t>::value) { + return addTensor(name, tensor, dai::TensorInfo::DataType::U8F); + } else { + throw std::runtime_error("Unsupported datatype"); + } + } + + NNData& addTensor(const std::string& name, const xt::xarray& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::INT); + }; + NNData& addTensor(const std::string& name, const xt::xarray& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP16); + }; + NNData& addTensor(const std::string& name, const xt::xarray& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP32); + }; + NNData& addTensor(const std::string& name, const xt::xarray& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::FP64); + }; + NNData& addTensor(const std::string& name, const xt::xarray& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::I8); + }; + NNData& addTensor(const std::string& name, const xt::xarray& tensor) { + return addTensor(name, tensor, dai::TensorInfo::DataType::U8F); }; /** @@ -226,6 +303,22 @@ class NNData : public Buffer { return addTensor<_Ty>(name, xt::adapt(data, std::vector{1, data.size()}), order); }; + /** + * @brief Add a tensor to this NNData object. + * Implicitly adds a TensorInfo::DataType + * + * @param name: Name of the tensor + * @param data: array + * @param order: Storage order of the tensor + * @return NNData&: reference to this object + */ + + template + NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& data, TensorInfo::StorageOrder order) { + auto dataType = std::is_integral<_Ty>::value ? dai::TensorInfo::DataType::U8F : dai::TensorInfo::DataType::FP16; + return addTensor<_Ty>(name, data, dataType, order); + }; + /** * @brief Add a tensor to this NNData object. The storage order is picked based on the number of dimensions of the tensor. * Float values are converted to FP16 and integers are cast to bytes. @@ -235,7 +328,7 @@ class NNData : public Buffer { * @return NNData&: reference to this object */ template - NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor) { + NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor, dai::TensorInfo::DataType dataType) { TensorInfo::StorageOrder order; switch(tensor.shape().size()) { case 1: @@ -253,7 +346,7 @@ class NNData : public Buffer { default: throw std::runtime_error("Unsupported tensor shape. Only 1D, 2D, 3D and 4D tensors are supported"); } - return addTensor(name, tensor, order); + return addTensor(name, tensor, dataType, order); } /** @@ -266,8 +359,15 @@ class NNData : public Buffer { * @return NNData&: reference to this object */ template - NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor, const TensorInfo::StorageOrder order) { + NNData& addTensor(const std::string& name, const xt::xarray<_Ty>& tensor, dai::TensorInfo::DataType dataType, const TensorInfo::StorageOrder order) { static_assert(std::is_integral<_Ty>::value || std::is_floating_point<_Ty>::value, "Tensor type needs to be integral or floating point"); + //if(dataType==dai::TensorInfo::DataType::FP32) std::cout<<"FP32\n"; + //else if(dataType==dai::TensorInfo::DataType::FP16) std::cout<<"FP16\n"; + //else if(dataType==dai::TensorInfo::DataType::INT) std::cout<<"INT\n"; + //else if(dataType==dai::TensorInfo::DataType::I8) std::cout<<"I8\n"; + //else if(dataType==dai::TensorInfo::DataType::U8F) std::cout<<"U8F\n"; + //else if(dataType==dai::TensorInfo::DataType::FP64) std::cout<<"FP64\n"; + //else std::cout<<"Unsupported type\n"; // Check if data is vector type of data if(std::dynamic_pointer_cast(data) == nullptr) { @@ -277,7 +377,23 @@ class NNData : public Buffer { auto vecData = std::dynamic_pointer_cast(data); // Get size in bytes of the converted tensor data, u8 for integral and fp16 for floating point - const size_t sConvertedData = std::is_integral<_Ty>::value ? tensor.size() : 2 * tensor.size(); + //const size_t sConvertedData = std::is_integral<_Ty>::value ? tensor.size() : 2 * tensor.size(); + size_t sConvertedData = tensor.size(); + switch(dataType){ + case dai::TensorInfo::DataType::FP64: + sConvertedData *= 8; + break; + case dai::TensorInfo::DataType::FP32: + case dai::TensorInfo::DataType::INT: + sConvertedData *= 4; + break; + case dai::TensorInfo::DataType::FP16: + sConvertedData *= 2; + break; + case dai::TensorInfo::DataType::U8F: + case dai::TensorInfo::DataType::I8: + break; + } // Append bytes so that each new tensor is DATA_ALIGNMENT aligned size_t remainder = std::distance(vecData->begin(), vecData->end()) % DATA_ALIGNMENT; @@ -291,22 +407,38 @@ class NNData : public Buffer { // Reserve space vecData->resize(offset + sConvertedData); - // Convert data to u8 or fp16 and write to data - if(std::is_integral<_Ty>::value) { + // Convert data to appropriate data type and write to data + if(dataType == dai::TensorInfo::DataType::I8) { for(uint32_t i = 0; i < tensor.size(); i++) { - vecData->data()[i + offset] = (uint8_t)tensor.data()[i]; + vecData->data()[i + offset] = (int8_t)tensor.data()[i]; } - } else { + } else if(dataType == dai::TensorInfo::DataType::FP16) { for(uint32_t i = 0; i < tensor.size(); i++) { *(uint16_t*)(&vecData->data()[2 * i + offset]) = fp32_to_fp16(tensor.data()[i]); } + } else if(dataType == dai::TensorInfo::DataType::FP32){ + for(uint32_t i = 0; i < tensor.size(); i++) { + *(float*)(&vecData->data()[4 * i + offset]) = tensor.data()[i]; + } + } else if(dataType == dai::TensorInfo::DataType::INT){ + for(uint32_t i = 0; i < tensor.size(); i++) { + *(int32_t*)(&vecData->data()[4 * i + offset]) = tensor.data()[i]; + } + } else if(dataType == dai::TensorInfo::DataType::U8F) { + for(uint32_t i = 0; i < tensor.size(); i++) { + vecData->data()[i + offset] = (uint8_t)tensor.data()[i]; + } + }else if(dataType == dai::TensorInfo::DataType::FP64){ + for(uint32_t i = 0; i < tensor.size(); i++) { + *(double*)(&vecData->data()[8 * i + offset]) = tensor.data()[i]; + } } // Add entry in tensors TensorInfo info; info.name = name; info.offset = static_cast(offset); - info.dataType = std::is_integral<_Ty>::value ? TensorInfo::DataType::U8F : TensorInfo::DataType::FP16; + info.dataType = dataType; info.numDimensions = tensor.dimension(); info.order = order; for(uint32_t i = 0; i < tensor.dimension(); i++) { @@ -374,6 +506,11 @@ class NNData : public Buffer { tensor.data()[i] = reinterpret_cast(data->getData().data())[it->offset / sizeof(float_t) + i]; } break; + case TensorInfo::DataType::FP64: + for(uint32_t i = 0; i < tensor.size(); i++) { + tensor.data()[i] = reinterpret_cast(data->getData().data())[it->offset / sizeof(double_t) + i]; + } + break; } if(dequantize) { if(it->quantization) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e421b6546..fa0a71476 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -6,6 +6,13 @@ enable_testing() hunter_add_package(Catch2) find_package(Catch2 CONFIG REQUIRED) +# Print details of the Catch2 package +message(STATUS "Catch2_FOUND: ${Catch2_FOUND}") +message(STATUS "Catch2_VERSION: ${Catch2_VERSION}") +message(STATUS "Catch2_INCLUDE_DIRS: ${Catch2_INCLUDE_DIRS}") +message(STATUS "Catch2_LIBRARIES: ${Catch2_LIBRARIES}") + + # Create environments for usb & poe labels set(test_usb_env # Misc diff --git a/tests/src/onhost_tests/pipeline/datatype/nndata_test.cpp b/tests/src/onhost_tests/pipeline/datatype/nndata_test.cpp index 953fe9b7e..768996394 100644 --- a/tests/src/onhost_tests/pipeline/datatype/nndata_test.cpp +++ b/tests/src/onhost_tests/pipeline/datatype/nndata_test.cpp @@ -95,4 +95,38 @@ TEST_CASE("NNData double storage conversions") { REQUIRE_THROWS_AS(nndata.getTensor("c1", dai::TensorInfo::StorageOrder::W), std::runtime_error); REQUIRE(nndata.getTensor("c1", dai::TensorInfo::StorageOrder::NC) == tensorC1); +} + +TEST_CASE("addTensor overloads"){ + dai::NNData nndata; + xt::xarray tensorINT = {{1, 2, 1}, {4, -5, 9}}; + xt::xarray tensorUINT16 = {{1, 2, 1}, {4, 5, 9}}; + xt::xarray tensorFLOAT = {{1.1, 2, 1.3}, {4.4, 5.5, 9.6}}; + xt::xarray tensorDOUBLE = {{1, 2, 1.1}, {4, 5.2, 9.3}}; + xt::xarray tensorINT8 = {{1, 2, -1}, {4, 5, 9}}; + xt::xarray tensorUINT8 = {{1, 2, 1}, {4, 5, 9}}; + + std::vector vectorINT = {1, 2, 1, 4, -5, 9}; + std::vector vectorUINT16 = {1, 2, 1, 4, 5, 9}; + std::vector vectorFLOAT = {1.1, 2, 1.3, 4.4, 5.5, 9.6}; + std::vector vectorDOUBLE = {1, 2, 1.1, 4, 5.2, 9.3}; + std::vector vectorINT8 = {1, 2, -1, 4, 5, 9}; + std::vector vectorUINT8 = {1, 2, 1, 4, 5, 9}; + + REQUIRE_NOTHROW(nndata.addTensor("INT", tensorINT)); + REQUIRE_NOTHROW(nndata.addTensor("UINT16", tensorUINT16)); + REQUIRE_NOTHROW(nndata.addTensor("FLOAT", tensorFLOAT)); + REQUIRE_NOTHROW(nndata.addTensor("DOUBLE", tensorDOUBLE)); + REQUIRE_NOTHROW(nndata.addTensor("INT8", tensorINT8)); + REQUIRE_NOTHROW(nndata.addTensor("UINT8", tensorUINT8)); + + REQUIRE_NOTHROW(nndata.addTensor("VINT", vectorINT)); + REQUIRE_NOTHROW(nndata.addTensor("VUINT16", vectorUINT16)); + REQUIRE_NOTHROW(nndata.addTensor("VFLOAT", vectorFLOAT)); + REQUIRE_NOTHROW(nndata.addTensor("VDOUBLE", vectorDOUBLE)); + REQUIRE_NOTHROW(nndata.addTensor("VINT8", vectorINT8)); + REQUIRE_NOTHROW(nndata.addTensor("VUINT8", vectorUINT8)); + + std::vector thr = {"abc", "asdf", "jkl;"}; + REQUIRE_THROWS(nndata.addTensor("STR", thr)); } \ No newline at end of file