From 797ba87c2a39b3247e94ea6dc5d3b1a0b2076a9d Mon Sep 17 00:00:00 2001 From: Alejandro Saucedo Date: Tue, 16 Nov 2021 23:03:07 +0000 Subject: [PATCH] Updated formatting Signed-off-by: Alejandro Saucedo --- src/Algorithm.cpp | 19 ++++--- src/Manager.cpp | 8 +-- src/OpAlgoDispatch.cpp | 6 +- src/include/kompute/Algorithm.hpp | 87 +++++++++++++++++------------ src/include/kompute/Manager.hpp | 20 ++++--- test/TestAsyncOperations.cpp | 15 +++-- test/TestLogisticRegression.cpp | 4 +- test/TestManager.cpp | 3 +- test/TestMultipleAlgoExecutions.cpp | 24 +++----- test/TestPushConstant.cpp | 83 +++++++++++++++++---------- test/TestSpecializationConstant.cpp | 1 - test/TestTensor.cpp | 3 +- 12 files changed, 161 insertions(+), 112 deletions(-) diff --git a/src/Algorithm.cpp b/src/Algorithm.cpp index 6caccf9b..9f3fe31b 100644 --- a/src/Algorithm.cpp +++ b/src/Algorithm.cpp @@ -23,8 +23,8 @@ Algorithm::isInit() void Algorithm::destroy() { - // We don't have to free memory on destroy as it's freed by the commandBuffer destructor - // if (this->mPushConstantsData) { + // We don't have to free memory on destroy as it's freed by the + // commandBuffer destructor if (this->mPushConstantsData) { // free(this->mPushConstantsData); // } // if (this->mSpecializationConstantsData) { @@ -238,7 +238,8 @@ Algorithm::createPipeline() if (this->mPushConstantsSize) { pushConstantRange.setStageFlags(vk::ShaderStageFlagBits::eCompute); pushConstantRange.setOffset(0); - pushConstantRange.setSize(this->mPushConstantsDataTypeMemorySize * this->mPushConstantsSize); + pushConstantRange.setSize(this->mPushConstantsDataTypeMemorySize * + this->mPushConstantsSize); pipelineLayoutInfo.setPushConstantRangeCount(1); pipelineLayoutInfo.setPPushConstantRanges(&pushConstantRange); @@ -254,7 +255,8 @@ Algorithm::createPipeline() for (uint32_t i = 0; i < this->mSpecializationConstantsSize; i++) { vk::SpecializationMapEntry specializationEntry( static_cast(i), - static_cast(this->mSpecializationConstantsDataTypeMemorySize * i), + static_cast( + this->mSpecializationConstantsDataTypeMemorySize * i), this->mSpecializationConstantsDataTypeMemorySize); specializationEntries.push_back(specializationEntry); @@ -265,7 +267,8 @@ Algorithm::createPipeline() vk::SpecializationInfo specializationInfo( static_cast(specializationEntries.size()), specializationEntries.data(), - this->mSpecializationConstantsDataTypeMemorySize * this->mSpecializationConstantsSize, + this->mSpecializationConstantsDataTypeMemorySize * + this->mSpecializationConstantsSize, this->mSpecializationConstantsData); vk::PipelineShaderStageCreateInfo shaderStage( @@ -339,12 +342,14 @@ Algorithm::recordBindPush(const vk::CommandBuffer& commandBuffer) { if (this->mPushConstantsSize) { KP_LOG_DEBUG("Kompute Algorithm binding push constants memory size: {}", - this->mPushConstantsSize * this->mPushConstantsDataTypeMemorySize); + this->mPushConstantsSize * + this->mPushConstantsDataTypeMemorySize); commandBuffer.pushConstants(*this->mPipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, - this->mPushConstantsSize * this->mPushConstantsDataTypeMemorySize, + this->mPushConstantsSize * + this->mPushConstantsDataTypeMemorySize, this->mPushConstantsData); } } diff --git a/src/Manager.cpp b/src/Manager.cpp index d87a712a..0ce3df62 100644 --- a/src/Manager.cpp +++ b/src/Manager.cpp @@ -288,22 +288,22 @@ Manager::createDevice(const std::vector& familyQueueIndices, } this->mFreeDevice = true; - + // Getting an integer that says how many vuklan devices we have uint32_t deviceCount = 0; this->mInstance->enumeratePhysicalDevices(&deviceCount, nullptr); - + // This means there are no devices at all if (deviceCount == 0) { throw std::runtime_error("Failed to find GPUs with Vulkan support! " "Maybe you haven't installed vulkan drivers?"); } - + // This means that we're exceeding our device limit, for // example if we have 2 devices, just physicalDeviceIndex // 0 and 1 are acceptable. Hence, physicalDeviceIndex should // always be less than deviceCount, else we raise an error - if ( !(deviceCount > physicalDeviceIndex) ) { + if (!(deviceCount > physicalDeviceIndex)) { throw std::runtime_error("There is no such physical index or device, " "please use your existing device"); } diff --git a/src/OpAlgoDispatch.cpp b/src/OpAlgoDispatch.cpp index 88d6e55f..057ee34d 100644 --- a/src/OpAlgoDispatch.cpp +++ b/src/OpAlgoDispatch.cpp @@ -33,9 +33,9 @@ OpAlgoDispatch::record(const vk::CommandBuffer& commandBuffer) if (this->mPushConstantsSize) { this->mAlgorithm->setPushConstants( - this->mPushConstantsData, - this->mPushConstantsSize, - this->mPushConstantsDataTypeMemorySize); + this->mPushConstantsData, + this->mPushConstantsSize, + this->mPushConstantsDataTypeMemorySize); } this->mAlgorithm->recordBindCore(commandBuffer); diff --git a/src/include/kompute/Algorithm.hpp b/src/include/kompute/Algorithm.hpp index be17a2d0..bb6b9faa 100644 --- a/src/include/kompute/Algorithm.hpp +++ b/src/include/kompute/Algorithm.hpp @@ -24,12 +24,13 @@ class Algorithm * @param spirv (optional) The spirv code to use to create the algorithm * @param workgroup (optional) The kp::Workgroup to use for the dispatch * which defaults to kp::Workgroup(tensor[0].size(), 1, 1) if not set. - * @param specializationConstants (optional) The templatable param is to be used to - * initialize the specialization constants which cannot be changed once set. - * @param pushConstants (optional) This templatable param is to be used when - * initializing the pipeline, which set the size of the push constants - - * these can be modified but all new values must have the same data type and length - * as otherwise it will result in errors. + * @param specializationConstants (optional) The templatable param is to be + * used to initialize the specialization constants which cannot be changed + * once set. + * @param pushConstants (optional) This templatable param is to be used + * when initializing the pipeline, which set the size of the push constants + * - these can be modified but all new values must have the same data type + * and length as otherwise it will result in errors. */ template Algorithm(std::shared_ptr device, @@ -44,15 +45,20 @@ class Algorithm this->mDevice = device; if (tensors.size() && spirv.size()) { - KP_LOG_INFO("Kompute Algorithm initialising with tensor size: {} and " - "spirv size: {}", - tensors.size(), - spirv.size()); - this->rebuild( - tensors, spirv, workgroup, specializationConstants, pushConstants); + KP_LOG_INFO( + "Kompute Algorithm initialising with tensor size: {} and " + "spirv size: {}", + tensors.size(), + spirv.size()); + this->rebuild(tensors, + spirv, + workgroup, + specializationConstants, + pushConstants); } else { - KP_LOG_INFO("Kompute Algorithm constructor with empty tensors and or " - "spirv so not rebuilding vulkan components"); + KP_LOG_INFO( + "Kompute Algorithm constructor with empty tensors and or " + "spirv so not rebuilding vulkan components"); } } @@ -64,8 +70,9 @@ class Algorithm * @param spirv The spirv code to use to create the algorithm * @param workgroup (optional) The kp::Workgroup to use for the dispatch * which defaults to kp::Workgroup(tensor[0].size(), 1, 1) if not set. - * @param specializationConstants (optional) The std::vector to use to - * initialize the specialization constants which cannot be changed once set. + * @param specializationConstants (optional) The std::vector to use + * to initialize the specialization constants which cannot be changed once + * set. * @param pushConstants (optional) The std::vector to use when * initializing the pipeline, which set the size of the push constants - * these can be modified but all new values must have the same vector size @@ -87,11 +94,14 @@ class Algorithm if (this->mSpecializationConstantsData) { free(this->mSpecializationConstantsData); } - uint32_t memorySize = sizeof(decltype(specializationConstants.back())); + uint32_t memorySize = + sizeof(decltype(specializationConstants.back())); uint32_t size = specializationConstants.size(); uint32_t totalSize = size * memorySize; this->mSpecializationConstantsData = malloc(totalSize); - memcpy(this->mSpecializationConstantsData, specializationConstants.data(), totalSize); + memcpy(this->mSpecializationConstantsData, + specializationConstants.data(), + totalSize); this->mSpecializationConstantsDataTypeMemorySize = memorySize; this->mSpecializationConstantsSize = size; } @@ -109,11 +119,11 @@ class Algorithm this->mPushConstantsSize = size; } - this->setWorkgroup(workgroup, - this->mTensors.size() ? this->mTensors[0]->size() : 1); + this->setWorkgroup( + workgroup, this->mTensors.size() ? this->mTensors[0]->size() : 1); - // Descriptor pool is created first so if available then destroy all before - // rebuild + // Descriptor pool is created first so if available then destroy all + // before rebuild if (this->isInit()) { this->destroy(); } @@ -176,9 +186,9 @@ class Algorithm * Sets the push constants to the new value provided to use in the next * bindPush() * - * @param pushConstants The templatable vector is to be used to set the push constants to use in the - * next bindPush(...) calls. The constants provided must be of the same size - * as the ones created during initialization. + * @param pushConstants The templatable vector is to be used to set the push + * constants to use in the next bindPush(...) calls. The constants provided + * must be of the same size as the ones created during initialization. */ template void setPushConstants(const std::vector& pushConstants) @@ -193,21 +203,24 @@ class Algorithm * Sets the push constants to the new value provided to use in the next * bindPush() with the raw memory block location and memory size to be used. * - * @param data The raw data point to copy the data from, without modifying the pointer. + * @param data The raw data point to copy the data from, without modifying + * the pointer. * @param size The number of data elements provided in the data * @param memorySize The memory size of each of the data elements in bytes. */ - void setPushConstants(void* data, uint32_t size, uint32_t memorySize) { + void setPushConstants(void* data, uint32_t size, uint32_t memorySize) + { uint32_t totalSize = memorySize * size; - uint32_t previousTotalSize = this->mPushConstantsDataTypeMemorySize * this->mPushConstantsSize; + uint32_t previousTotalSize = + this->mPushConstantsDataTypeMemorySize * this->mPushConstantsSize; if (totalSize != previousTotalSize) { - throw std::runtime_error( - fmt::format("Kompute Algorithm push " - "constant total memory size provided is {} but expected {} bytes", - totalSize, - previousTotalSize)); + throw std::runtime_error(fmt::format( + "Kompute Algorithm push " + "constant total memory size provided is {} but expected {} bytes", + totalSize, + previousTotalSize)); } if (this->mPushConstantsData) { free(this->mPushConstantsData); @@ -230,13 +243,15 @@ class Algorithm /** * Gets the specialization constants of the current algorithm. * - * @returns The std::vector currently set for specialization constants + * @returns The std::vector currently set for specialization + * constants */ template const std::vector getSpecializationConstants() { return { (T*)this->mSpecializationConstantsData, - ((T*)this->mSpecializationConstantsData) + this->mSpecializationConstantsSize }; + ((T*)this->mSpecializationConstantsData) + + this->mSpecializationConstantsSize }; } /** * Gets the specialization constants of the current algorithm. @@ -247,7 +262,7 @@ class Algorithm const std::vector getPushConstants() { return { (T*)this->mPushConstantsData, - ((T*)this->mPushConstantsData) + this->mPushConstantsSize }; + ((T*)this->mPushConstantsData) + this->mPushConstantsSize }; } /** * Gets the current tensors that are used in the algorithm. diff --git a/src/include/kompute/Manager.hpp b/src/include/kompute/Manager.hpp index d9b850ef..5ff32522 100644 --- a/src/include/kompute/Manager.hpp +++ b/src/include/kompute/Manager.hpp @@ -124,8 +124,9 @@ class Manager } /** - * Default non-template function that can be used to create algorithm objects - * which provides default types to the push and spec constants as floats. + * Default non-template function that can be used to create algorithm + * objects which provides default types to the push and spec constants as + * floats. * * @param tensors (optional) The tensors to initialise the algorithm with * @param spirv (optional) The SPIRV bytes for the algorithm to dispatch @@ -144,7 +145,8 @@ class Manager const std::vector& specializationConstants = {}, const std::vector& pushConstants = {}) { - return this->algorithm<>(tensors, spirv, workgroup, specializationConstants, pushConstants); + return this->algorithm<>( + tensors, spirv, workgroup, specializationConstants, pushConstants); } /** @@ -155,10 +157,10 @@ class Manager * @param spirv (optional) The SPIRV bytes for the algorithm to dispatch * @param workgroup (optional) kp::Workgroup for algorithm to use, and * defaults to (tensor[0].size(), 1, 1) - * @param specializationConstants (optional) templatable vector parameter to use for - * specialization constants, and defaults to an empty constant - * @param pushConstants (optional) templatable vector parameter to use for push constants, - * and defaults to an empty constant + * @param specializationConstants (optional) templatable vector parameter to + * use for specialization constants, and defaults to an empty constant + * @param pushConstants (optional) templatable vector parameter to use for + * push constants, and defaults to an empty constant * @returns Shared pointer with initialised algorithm */ template @@ -200,7 +202,8 @@ class Manager /** * Information about the current device. * - * @return vk::PhysicalDeviceProperties containing information about the device + * @return vk::PhysicalDeviceProperties containing information about the + *device **/ vk::PhysicalDeviceProperties getDeviceProperties() const; @@ -211,7 +214,6 @@ class Manager **/ std::vector listDevices() const; - private: // -------------- OPTIONALLY OWNED RESOURCES std::shared_ptr mInstance = nullptr; diff --git a/test/TestAsyncOperations.cpp b/test/TestAsyncOperations.cpp index 1ece3ec1..cacc41da 100644 --- a/test/TestAsyncOperations.cpp +++ b/test/TestAsyncOperations.cpp @@ -168,8 +168,10 @@ TEST(TestAsyncOperations, TestManagerAsyncExecution) // AMD Drivers in Windows may see an error in this line due to timeout. // In order to fix this, it requires a change on Windows registries. - // More details on this can be found here: https://docs.substance3d.com/spdoc/gpu-drivers-crash-with-long-computations-128745489.html - // Context on solution discussed in github: https://github.com/KomputeProject/kompute/issues/196#issuecomment-808866505 + // More details on this can be found here: + // https://docs.substance3d.com/spdoc/gpu-drivers-crash-with-long-computations-128745489.html + // Context on solution discussed in github: + // https://github.com/KomputeProject/kompute/issues/196#issuecomment-808866505 sq1->evalAsync(algo1); sq2->evalAsync(algo2); @@ -232,8 +234,10 @@ TEST(TestAsyncOperations, TestManagerAsyncExecutionTimeout) // AMD Drivers in Windows may see an error in this line due to timeout. // In order to fix this, it requires a change on Windows registries. - // More details on this can be found here: https://docs.substance3d.com/spdoc/gpu-drivers-crash-with-long-computations-128745489.html - // Context on solution discussed in github: https://github.com/KomputeProject/kompute/issues/196#issuecomment-808866505 + // More details on this can be found here: + // https://docs.substance3d.com/spdoc/gpu-drivers-crash-with-long-computations-128745489.html + // Context on solution discussed in github: + // https://github.com/KomputeProject/kompute/issues/196#issuecomment-808866505 sq1->evalAsync(algo1); sq2->evalAsync(algo2); @@ -245,7 +249,8 @@ TEST(TestAsyncOperations, TestManagerAsyncExecutionTimeout) std::chrono::duration_cast(endSync - startSync) .count(); - // The time should several orders of magnitude smaller (in this 100k instead of 1m ns) + // The time should several orders of magnitude smaller (in this 100k instead + // of 1m ns) EXPECT_LT(duration, 100000); sq1->evalAsync({ tensorA, tensorB }); diff --git a/test/TestLogisticRegression.cpp b/test/TestLogisticRegression.cpp index 9b736213..b1ffaef8 100644 --- a/test/TestLogisticRegression.cpp +++ b/test/TestLogisticRegression.cpp @@ -126,8 +126,8 @@ TEST(TestLogisticRegression, TestMainLogisticRegressionManualCopy) kp::shader_data:: shaders_glsl_logisticregression_comp_spv_len)); - std::shared_ptr algorithm = - mgr.algorithm(params, spirv, kp::Workgroup(), std::vector({ 5.0 })); + std::shared_ptr algorithm = mgr.algorithm( + params, spirv, kp::Workgroup(), std::vector({ 5.0 })); std::shared_ptr sq = mgr.sequence() diff --git a/test/TestManager.cpp b/test/TestManager.cpp index 21edc72c..e882421f 100644 --- a/test/TestManager.cpp +++ b/test/TestManager.cpp @@ -87,7 +87,8 @@ TEST(TestManager, TestClearDestroy) { std::shared_ptr> tensorLHS = mgr.tensor({ 0, 1, 2 }); std::shared_ptr> tensorRHS = mgr.tensor({ 2, 4, 6 }); - std::shared_ptr> tensorOutput = mgr.tensor({ 0, 0, 0 }); + std::shared_ptr> tensorOutput = + mgr.tensor({ 0, 0, 0 }); std::vector> params = { tensorLHS, tensorRHS, diff --git a/test/TestMultipleAlgoExecutions.cpp b/test/TestMultipleAlgoExecutions.cpp index 40d190c6..f554f54f 100644 --- a/test/TestMultipleAlgoExecutions.cpp +++ b/test/TestMultipleAlgoExecutions.cpp @@ -53,11 +53,8 @@ TEST(TestMultipleAlgoExecutions, TestEndToEndFunctionality) std::vector pushConstsA({ 2.0 }); std::vector pushConstsB({ 3.0 }); - auto algorithm = mgr.algorithm(params, - compileSource(shader), - workgroup, - specConsts, - pushConstsA); + auto algorithm = mgr.algorithm( + params, compileSource(shader), workgroup, specConsts, pushConstsA); // 3. Run operation with string shader synchronously mgr.sequence() @@ -97,11 +94,11 @@ TEST(TestMultipleAlgoExecutions, SingleSequenceRecord) { // A sharedMemoryBarrier is required as the shader is not thread-safe:w std::shared_ptr shaderBarrier{ - new kp::OpMemoryBarrier({ tensorA }, - vk::AccessFlagBits::eTransferRead, - vk::AccessFlagBits::eShaderWrite, - vk::PipelineStageFlagBits::eComputeShader, - vk::PipelineStageFlagBits::eComputeShader) + new kp::OpMemoryBarrier({ tensorA }, + vk::AccessFlagBits::eTransferRead, + vk::AccessFlagBits::eShaderWrite, + vk::PipelineStageFlagBits::eComputeShader, + vk::PipelineStageFlagBits::eComputeShader) }; mgr.sequence() @@ -266,11 +263,8 @@ TEST(TestMultipleAlgoExecutions, TestAlgorithmUtilFunctions) std::vector specConsts({ 2 }); std::vector pushConsts({ 2.0 }); - auto algorithm = mgr.algorithm(params, - compileSource(shader), - workgroup, - specConsts, - pushConsts); + auto algorithm = mgr.algorithm( + params, compileSource(shader), workgroup, specConsts, pushConsts); EXPECT_EQ(algorithm->getWorkgroup(), workgroup); EXPECT_EQ(algorithm->getPushConstants(), pushConsts); diff --git a/test/TestPushConstant.cpp b/test/TestPushConstant.cpp index 4742cd18..c305f089 100644 --- a/test/TestPushConstant.cpp +++ b/test/TestPushConstant.cpp @@ -44,8 +44,10 @@ TEST(TestPushConstants, TestConstantsAlgoDispatchOverride) // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float - sq->eval(algo, std::vector{ 0.1, 0.2, 0.3 }); - sq->eval(algo, std::vector{ 0.3, 0.2, 0.1 }); + sq->eval(algo, + std::vector{ 0.1, 0.2, 0.3 }); + sq->eval(algo, + std::vector{ 0.3, 0.2, 0.1 }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 0.4, 0.4, 0.4 })); @@ -90,7 +92,8 @@ TEST(TestPushConstants, TestConstantsAlgoDispatchNoOverride) // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval(algo); - sq->eval(algo, std::vector{ 0.3, 0.2, 0.1 }); + sq->eval(algo, + std::vector{ 0.3, 0.2, 0.1 }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 0.4, 0.4, 0.4 })); @@ -156,22 +159,22 @@ TEST(TestPushConstants, TestConstantsWrongSize) // pa[1] += pcs.y; // pa[2] += pcs.z; // })"); -// +// // std::vector spirv = compileSource(shader); -// +// // std::shared_ptr sq = nullptr; -// +// // { // kp::Manager mgr; -// +// // std::shared_ptr> tensor = // mgr.tensor({ 0, 0, 0 }); -// +// // std::shared_ptr algo = mgr.algorithm( // { tensor }, spirv, kp::Workgroup({ 1 }), {}, { 0.0 }); -// +// // sq = mgr.sequence()->record({ tensor }); -// +// // EXPECT_THROW(sq->record( // algo, std::vector{ 1, 2, 3 }), // std::runtime_error); @@ -197,7 +200,8 @@ TEST(TestPushConstants, TestConstantsMixedTypes) pa[2] += pcs.z; })"); - struct TestConsts{ + struct TestConsts + { float x; uint32_t y; int32_t z; @@ -213,16 +217,19 @@ TEST(TestPushConstants, TestConstantsMixedTypes) std::shared_ptr> tensor = mgr.tensorT({ 0, 0, 0 }); - std::shared_ptr algo = mgr.algorithm( - { tensor }, spirv, kp::Workgroup({ 1 }), {}, {{ 0, 0, 0 }}); + std::shared_ptr algo = + mgr.algorithm( + { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float - sq->eval(algo, std::vector{{ 15.32, 2147483650, 10 }}); - sq->eval(algo, std::vector{{ 30.32, 2147483650, -3 }}); + sq->eval( + algo, std::vector{ { 15.32, 2147483650, 10 } }); + sq->eval( + algo, std::vector{ { 30.32, 2147483650, -3 } }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 45.64, 1300, 7 })); @@ -258,16 +265,19 @@ TEST(TestPushConstants, TestConstantsInt) std::shared_ptr> tensor = mgr.tensorT({ -1, -1, -1 }); - std::shared_ptr algo = mgr.algorithm( - { tensor }, spirv, kp::Workgroup({ 1 }), {}, {{ 0, 0, 0 }}); + std::shared_ptr algo = + mgr.algorithm( + { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float - sq->eval(algo, std::vector{{ -1, -1, -1 }}); - sq->eval(algo, std::vector{{ -1, -1, -1 }}); + sq->eval( + algo, std::vector{ { -1, -1, -1 } }); + sq->eval( + algo, std::vector{ { -1, -1, -1 } }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ -3, -3, -3 })); @@ -303,19 +313,25 @@ TEST(TestPushConstants, TestConstantsUnsignedInt) std::shared_ptr> tensor = mgr.tensorT({ 0, 0, 0 }); - std::shared_ptr algo = mgr.algorithm( - { tensor }, spirv, kp::Workgroup({ 1 }), {}, {{ 0, 0, 0 }}); + std::shared_ptr algo = + mgr.algorithm( + { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float - sq->eval(algo, std::vector{{ 2147483650, 2147483650, 2147483650 }}); - sq->eval(algo, std::vector{{ 5, 5, 5 }}); + sq->eval( + algo, + std::vector{ { 2147483650, 2147483650, 2147483650 } }); + sq->eval(algo, + std::vector{ { 5, 5, 5 } }); sq->eval({ tensor }); - EXPECT_EQ(tensor->vector(), std::vector({ 2147483655, 2147483655, 2147483655 })); + EXPECT_EQ( + tensor->vector(), + std::vector({ 2147483655, 2147483655, 2147483655 })); } } } @@ -349,18 +365,29 @@ TEST(TestPushConstants, TestConstantsDouble) mgr.tensorT({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( - { tensor }, spirv, kp::Workgroup({ 1 }), {}, {{ 0, 0, 0 }}); + { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float - sq->eval(algo, std::vector{{ 1.1111222233334444, 2.1111222233334444, 3.1111222233334444 }}); - sq->eval(algo, std::vector{{ 1.1111222233334444, 2.1111222233334444, 3.1111222233334444 }}); + sq->eval( + algo, + std::vector{ { 1.1111222233334444, + 2.1111222233334444, + 3.1111222233334444 } }); + sq->eval( + algo, + std::vector{ { 1.1111222233334444, + 2.1111222233334444, + 3.1111222233334444 } }); sq->eval({ tensor }); - EXPECT_EQ(tensor->vector(), std::vector({ 2.2222444466668888, 4.2222444466668888, 6.2222444466668888 })); + EXPECT_EQ(tensor->vector(), + std::vector({ 2.2222444466668888, + 4.2222444466668888, + 6.2222444466668888 })); } } } diff --git a/test/TestSpecializationConstant.cpp b/test/TestSpecializationConstant.cpp index abc507e7..e7bb3926 100644 --- a/test/TestSpecializationConstant.cpp +++ b/test/TestSpecializationConstant.cpp @@ -101,4 +101,3 @@ TEST(TestSpecializationConstants, TestConstantsInt) } } } - diff --git a/test/TestTensor.cpp b/test/TestTensor.cpp index 32c870a7..e6af04f2 100644 --- a/test/TestTensor.cpp +++ b/test/TestTensor.cpp @@ -33,7 +33,8 @@ TEST(TestTensor, DataTypes) { std::vector vec{ 0, 1, 2 }; std::shared_ptr> tensor = mgr.tensorT(vec); - EXPECT_EQ(tensor->dataType(), kp::Tensor::TensorDataTypes::eUnsignedInt); + EXPECT_EQ(tensor->dataType(), + kp::Tensor::TensorDataTypes::eUnsignedInt); } {