Skip to content

Commit

Permalink
Merge pull request #286 from COM8/clang-format
Browse files Browse the repository at this point in the history
Added .clang-format file and formatted everything
  • Loading branch information
axsaucedo authored May 4, 2022
2 parents f731f2e + 21b9943 commit de46d30
Show file tree
Hide file tree
Showing 35 changed files with 1,189 additions and 1,042 deletions.
5 changes: 5 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
BasedOnStyle: Mozilla
IndentWidth: 4

...
4 changes: 3 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,9 @@ win_build_xxd:
cd external/bin/ && gcc.exe -o xxd.exe xxd.c -DCYGWIN

format:
$(CLANG_FORMAT_BIN) -i -style="{BasedOnStyle: mozilla, IndentWidth: 4}" src/*.cpp src/include/kompute/*.hpp test/*cpp
for val in "examples single_include src test" ; do \
find $$val -depth -iname *.h -or -iname *.c -or -iname *.hpp -or -iname *.cpp | grep -v "shaders" | xargs $(CLANG_FORMAT_BIN) -style=file -i; \
done

static_scan:
cppcheck --project=build/compile_commands.json -iexternal/
Expand Down
121 changes: 63 additions & 58 deletions examples/android/android-simple/app/src/main/cpp/KomputeJniNative.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,104 +12,109 @@
// See the License for the specific language governing permissions and
// limitations under the License.


// Includes the Jni utilities for Android to be able to create the
// relevant bindings for java, including JNIEXPORT, JNICALL , and
// Includes the Jni utilities for Android to be able to create the
// relevant bindings for java, including JNIEXPORT, JNICALL , and
// other "j-variables".
#include <jni.h>

// The ML class exposing the Kompute ML workflow for training and
// The ML class exposing the Kompute ML workflow for training and
// prediction of inference data.
#include "KomputeModelML.hpp"

// Allows us to use the C++ sleep function to wait when loading the
// Allows us to use the C++ sleep function to wait when loading the
// Vulkan library in android
#include <unistd.h>

#ifndef KOMPUTE_VK_INIT_RETRIES
#define KOMPUTE_VK_INIT_RETRIES 5
#endif

static std::vector<float> jfloatArrayToVector(JNIEnv *env, const jfloatArray & fromArray) {
float *inCArray = env->GetFloatArrayElements(fromArray, NULL);
if (NULL == inCArray) return std::vector<float>();
static std::vector<float>
jfloatArrayToVector(JNIEnv* env, const jfloatArray& fromArray)
{
float* inCArray = env->GetFloatArrayElements(fromArray, NULL);
if (NULL == inCArray)
return std::vector<float>();
int32_t length = env->GetArrayLength(fromArray);

std::vector<float> outVector(inCArray, inCArray + length);
return outVector;
}

static jfloatArray vectorToJFloatArray(JNIEnv *env, const std::vector<float> & fromVector) {
static jfloatArray
vectorToJFloatArray(JNIEnv* env, const std::vector<float>& fromVector)
{
jfloatArray ret = env->NewFloatArray(fromVector.size());
if (NULL == ret) return NULL;
if (NULL == ret)
return NULL;
env->SetFloatArrayRegion(ret, 0, fromVector.size(), fromVector.data());
return ret;
}

extern "C" {
extern "C"
{

JNIEXPORT jboolean JNICALL
Java_com_ethicalml_kompute_KomputeJni_initVulkan(JNIEnv *env, jobject thiz) {
JNIEXPORT jboolean JNICALL
Java_com_ethicalml_kompute_KomputeJni_initVulkan(JNIEnv* env, jobject thiz)
{

KP_LOG_INFO("Initialising vulkan");
KP_LOG_INFO("Initialising vulkan");

uint32_t totalRetries = 0;
uint32_t totalRetries = 0;

while (totalRetries < KOMPUTE_VK_INIT_RETRIES) {
KP_LOG_INFO("VULKAN LOAD TRY NUMBER: %u", totalRetries);
if(InitVulkan()) {
break;
while (totalRetries < KOMPUTE_VK_INIT_RETRIES) {
KP_LOG_INFO("VULKAN LOAD TRY NUMBER: %u", totalRetries);
if (InitVulkan()) {
break;
}
sleep(1);
totalRetries++;
}
sleep(1);
totalRetries++;
}

return totalRetries < KOMPUTE_VK_INIT_RETRIES;
}


JNIEXPORT jfloatArray JNICALL
Java_com_ethicalml_kompute_KomputeJni_kompute(
JNIEnv *env,
jobject thiz,
jfloatArray xiJFloatArr,
jfloatArray xjJFloatArr,
jfloatArray yJFloatArr) {
return totalRetries < KOMPUTE_VK_INIT_RETRIES;
}

KP_LOG_INFO("Creating manager");
JNIEXPORT jfloatArray JNICALL
Java_com_ethicalml_kompute_KomputeJni_kompute(JNIEnv* env,
jobject thiz,
jfloatArray xiJFloatArr,
jfloatArray xjJFloatArr,
jfloatArray yJFloatArr)
{

std::vector<float> xiVector = jfloatArrayToVector(env, xiJFloatArr);
std::vector<float> xjVector = jfloatArrayToVector(env, xjJFloatArr);
std::vector<float> yVector = jfloatArrayToVector(env, yJFloatArr);
KP_LOG_INFO("Creating manager");

KomputeModelML kml;
kml.train(yVector, xiVector, xjVector);
std::vector<float> xiVector = jfloatArrayToVector(env, xiJFloatArr);
std::vector<float> xjVector = jfloatArrayToVector(env, xjJFloatArr);
std::vector<float> yVector = jfloatArrayToVector(env, yJFloatArr);

std::vector<float> pred = kml.predict(xiVector, xjVector);
KomputeModelML kml;
kml.train(yVector, xiVector, xjVector);

return vectorToJFloatArray(env, pred);
}
std::vector<float> pred = kml.predict(xiVector, xjVector);

JNIEXPORT jfloatArray JNICALL
Java_com_ethicalml_kompute_KomputeJni_komputeParams(
JNIEnv *env,
jobject thiz,
jfloatArray xiJFloatArr,
jfloatArray xjJFloatArr,
jfloatArray yJFloatArr) {
return vectorToJFloatArray(env, pred);
}

KP_LOG_INFO("Creating manager");
JNIEXPORT jfloatArray JNICALL
Java_com_ethicalml_kompute_KomputeJni_komputeParams(JNIEnv* env,
jobject thiz,
jfloatArray xiJFloatArr,
jfloatArray xjJFloatArr,
jfloatArray yJFloatArr)
{

std::vector<float> xiVector = jfloatArrayToVector(env, xiJFloatArr);
std::vector<float> xjVector = jfloatArrayToVector(env, xjJFloatArr);
std::vector<float> yVector = jfloatArrayToVector(env, yJFloatArr);
KP_LOG_INFO("Creating manager");

KomputeModelML kml;
kml.train(yVector, xiVector, xjVector);
std::vector<float> xiVector = jfloatArrayToVector(env, xiJFloatArr);
std::vector<float> xjVector = jfloatArrayToVector(env, xjJFloatArr);
std::vector<float> yVector = jfloatArrayToVector(env, yJFloatArr);

std::vector<float> params = kml.get_params();
KomputeModelML kml;
kml.train(yVector, xiVector, xjVector);

return vectorToJFloatArray(env, params);
}
std::vector<float> params = kml.get_params();

return vectorToJFloatArray(env, params);
}
}
43 changes: 24 additions & 19 deletions examples/android/android-simple/app/src/main/cpp/KomputeModelML.cpp
100755 → 100644
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@

#include "KomputeModelML.hpp"

KomputeModelML::KomputeModelML() {
KomputeModelML::KomputeModelML() {}

}

KomputeModelML::~KomputeModelML() {
KomputeModelML::~KomputeModelML() {}

}

void KomputeModelML::train(std::vector<float> yData, std::vector<float> xIData, std::vector<float> xJData) {
void
KomputeModelML::train(std::vector<float> yData,
std::vector<float> xIData,
std::vector<float> xJData)
{

std::vector<float> zerosData;

Expand Down Expand Up @@ -42,17 +42,19 @@ void KomputeModelML::train(std::vector<float> yData, std::vector<float> xIData,
bIn, bOut, lOut };

std::vector<uint32_t> spirv = std::vector<uint32_t>(
(uint32_t*)kp::shader_data::shaders_glsl_logisticregression_comp_spv,
(uint32_t*)(kp::shader_data::shaders_glsl_logisticregression_comp_spv +
kp::shader_data::shaders_glsl_logisticregression_comp_spv_len));

(uint32_t*)kp::shader_data::shaders_glsl_logisticregression_comp_spv,
(uint32_t*)(kp::shader_data::
shaders_glsl_logisticregression_comp_spv +
kp::shader_data::
shaders_glsl_logisticregression_comp_spv_len));

std::shared_ptr<kp::Algorithm> algorithm = mgr.algorithm(
params, spirv, kp::Workgroup({ 5 }), std::vector<float>({ 5.0 }));
params, spirv, kp::Workgroup({ 5 }), std::vector<float>({ 5.0 }));

mgr.sequence()->eval<kp::OpTensorSyncDevice>(params);

std::shared_ptr<kp::Sequence> sq = mgr.sequence()
std::shared_ptr<kp::Sequence> sq =
mgr.sequence()
->record<kp::OpTensorSyncDevice>({ wIn, bIn })
->record<kp::OpAlgoDispatch>(algorithm)
->record<kp::OpTensorSyncLocal>({ wOutI, wOutJ, bOut, lOut });
Expand All @@ -79,7 +81,9 @@ void KomputeModelML::train(std::vector<float> yData, std::vector<float> xIData,
}
}

std::vector<float> KomputeModelML::predict(std::vector<float> xI, std::vector<float> xJ) {
std::vector<float>
KomputeModelML::predict(std::vector<float> xI, std::vector<float> xJ)
{

KP_LOG_INFO("Running prediction inference");

Expand All @@ -93,9 +97,8 @@ std::vector<float> KomputeModelML::predict(std::vector<float> xI, std::vector<fl
for (size_t i = 0; i < xI.size(); i++) {
float xIVal = xI[i];
float xJVal = xJ[i];
float result = (xIVal * this->mWeights[0]
+ xJVal * this->mWeights[1]
+ this->mBias[0]);
float result = (xIVal * this->mWeights[0] + xJVal * this->mWeights[1] +
this->mBias[0]);

// Instead of using sigmoid we'll just return full numbers
float var = result > 0 ? 1 : 0;
Expand All @@ -107,13 +110,15 @@ std::vector<float> KomputeModelML::predict(std::vector<float> xI, std::vector<fl
return retVector;
}

std::vector<float> KomputeModelML::get_params() {
std::vector<float>
KomputeModelML::get_params()
{

KP_LOG_INFO("Displaying results");

std::vector<float> retVector;

if(this->mWeights.size() + this->mBias.size() == 0) {
if (this->mWeights.size() + this->mBias.size() == 0) {
return retVector;
}

Expand Down
18 changes: 10 additions & 8 deletions examples/android/android-simple/app/src/main/cpp/KomputeModelML.hpp
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,30 @@
#ifndef KOMPUTEMODELML_HPP
#define KOMPUTEMODELML_HPP

#include <vector>
#include <string>
#include <memory>
#include <string>
#include <vector>

#include "kompute/Kompute.hpp"

class KomputeModelML {
class KomputeModelML
{

public:
public:
KomputeModelML();
virtual ~KomputeModelML();

void train(std::vector<float> yData, std::vector<float> xIData, std::vector<float> xJData);
void train(std::vector<float> yData,
std::vector<float> xIData,
std::vector<float> xJData);

std::vector<float> predict(std::vector<float> xI, std::vector<float> xJ);

std::vector<float> get_params();

private:
private:
std::vector<float> mWeights;
std::vector<float> mBias;

};

static std::string LR_SHADER = R"(
Expand Down Expand Up @@ -83,4 +85,4 @@ void main() {
}
)";

#endif //ANDROID_SIMPLE_KOMPUTEMODELML_HPP
#endif // ANDROID_SIMPLE_KOMPUTEMODELML_HPP
44 changes: 25 additions & 19 deletions examples/array_multiplication/src/Main.cpp
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -5,23 +5,27 @@

#include "kompute/Kompute.hpp"

static
std::vector<uint32_t>
compileSource(
const std::string& source)
static std::vector<uint32_t>
compileSource(const std::string& source)
{
std::ofstream fileOut("tmp_kp_shader.comp");
fileOut << source;
fileOut.close();
if (system(std::string("glslangValidator -V tmp_kp_shader.comp -o tmp_kp_shader.comp.spv").c_str()))
fileOut << source;
fileOut.close();
if (system(
std::string(
"glslangValidator -V tmp_kp_shader.comp -o tmp_kp_shader.comp.spv")
.c_str()))
throw std::runtime_error("Error running glslangValidator command");
std::ifstream fileStream("tmp_kp_shader.comp.spv", std::ios::binary);
std::vector<char> buffer;
buffer.insert(buffer.begin(), std::istreambuf_iterator<char>(fileStream), {});
return {(uint32_t*)buffer.data(), (uint32_t*)(buffer.data() + buffer.size())};
buffer.insert(
buffer.begin(), std::istreambuf_iterator<char>(fileStream), {});
return { (uint32_t*)buffer.data(),
(uint32_t*)(buffer.data() + buffer.size()) };
}

int main()
int
main()
{
#if KOMPUTE_ENABLE_SPDLOG
spdlog::set_level(
Expand Down Expand Up @@ -53,21 +57,23 @@ int main()
}
)");

std::vector<std::shared_ptr<kp::Tensor>> params = { tensorInA, tensorInB, tensorOut };
std::vector<std::shared_ptr<kp::Tensor>> params = { tensorInA,
tensorInB,
tensorOut };

std::shared_ptr<kp::Algorithm> algo = mgr.algorithm(params, compileSource(shader));
std::shared_ptr<kp::Algorithm> algo =
mgr.algorithm(params, compileSource(shader));

mgr.sequence()
->record<kp::OpTensorSyncDevice>(params)
->record<kp::OpAlgoDispatch>(algo)
->record<kp::OpTensorSyncLocal>(params)
->eval();
->record<kp::OpTensorSyncDevice>(params)
->record<kp::OpAlgoDispatch>(algo)
->record<kp::OpTensorSyncLocal>(params)
->eval();

// prints "Output { 0 4 12 }"
std::cout<< "Output: { ";
std::cout << "Output: { ";
for (const float& elem : tensorOut->vector()) {
std::cout << elem << " ";
std::cout << elem << " ";
}
std::cout << "}" << std::endl;
}

Loading

0 comments on commit de46d30

Please sign in to comment.