diff --git a/llvm_passes/CustomTensorOperatorInstSelector.cpp b/llvm_passes/CustomTensorOperatorInstSelector.cpp index a049e8ef..7fb174ca 100644 --- a/llvm_passes/CustomTensorOperatorInstSelector.cpp +++ b/llvm_passes/CustomTensorOperatorInstSelector.cpp @@ -69,24 +69,6 @@ class CustomTensorOperatorInstSelector : public HardwareFIInstSelector { // Operator number to do FI int FIOperatorCount; - Operator(std::string name, std::string count) { - - OperatorName = name; - FIOperatorCount = atoll(count.c_str()); - OperatorCount = 0; - OperatorNumber = getOperatorNumber(name); - - if (OperatorNumber == -1) { - std::cout<<"Operator name "<< OperatorName.c_str() << - "not found.\n"; - std::cout<<"Please use the following operator name(s): \ - conv, relu, maxpool, matmul, add, avgpool, all, and softmax."; - assert(false && "Invalid input operator name"); - } - - assert(FIOperatorCount >= 0 && "Invalid input FI operator number"); - } - // Get unique Id corresponding to the ONNX operator. static int64_t getOperatorNumber(std::string name) { @@ -96,8 +78,10 @@ class CustomTensorOperatorInstSelector : public HardwareFIInstSelector { strcpy(opname, name.c_str()); + std::cout<<"OperatorName: "< ONNXOperatorId = { + std::map ONNXOperatorId = { {"conv", 1986948931}, {"relu", 1970038098}, {"maxpool", 30521821366870349}, @@ -113,6 +97,24 @@ class CustomTensorOperatorInstSelector : public HardwareFIInstSelector { return ONNXOperatorId[opname]; } + Operator(std::string name, std::string count) { + + OperatorName = name; + FIOperatorCount = atoll(count.c_str()); + OperatorCount = 0; + OperatorNumber = getOperatorNumber(name); + + if (OperatorNumber == -1) { + std::cout<<"Operator name "<< OperatorName.c_str() << + " not found.\n"; + std::cout<<"Please use the following operator name(s):\ + conv, relu, maxpool, matmul, add, avgpool, all, and softmax."; + assert(false && "Invalid input operator name"); + } + + assert(FIOperatorCount >= 0 && "Invalid input FI operator number"); + } + bool doFaultInjection(){ OperatorCount++; @@ -262,7 +264,7 @@ class CustomTensorOperatorInstSelector : public HardwareFIInstSelector { virtual void getCompileTimeInfo(std::map &info) { info["failure_class"] = "HardwareFault"; info["failure_mode"] = "CustomTensorOperator"; - info["targets"] = " *instset, std::map* > *instregmap) { std::error_code err; - raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::F_Append); + raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::OF_Append); for (std::set::const_iterator inst_it = instset->begin(); inst_it != instset->end(); ++inst_it) { diff --git a/llvm_passes/core/ProfilingPass.cpp b/llvm_passes/core/ProfilingPass.cpp index 6cc75224..61f93e5b 100644 --- a/llvm_passes/core/ProfilingPass.cpp +++ b/llvm_passes/core/ProfilingPass.cpp @@ -45,7 +45,7 @@ bool ProfilingPass::runOnModule(Module &M) { ctrl->getFIInstRegsMap(&fi_inst_regs_map); //BEHROOZ: std::error_code err; - raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::F_Append); + raw_fd_ostream logFile(llfilogfile.c_str(), err, sys::fs::OF_Append); for (std::map* >::const_iterator inst_reg_it = fi_inst_regs_map->begin(); diff --git a/sample_programs/memcpy1/input.yaml b/sample_programs/memcpy1/input.yaml index ca920253..990cf7a6 100644 --- a/sample_programs/memcpy1/input.yaml +++ b/sample_programs/memcpy1/input.yaml @@ -5,12 +5,14 @@ kernelOption: compileOption: instSelMethod: - - customInstselector: - include: - - InappropriateClose(API) + - insttype: + include: + - all + exclude: + - ret - regSelMethod: customregselector - customRegSelector: Automatic + regSelMethod: regloc + regloc: dstreg runOption: - run: diff --git a/sample_programs/mnist/compile.sh b/sample_programs/mnist/compile.sh index d3fda612..833ad247 100755 --- a/sample_programs/mnist/compile.sh +++ b/sample_programs/mnist/compile.sh @@ -3,7 +3,7 @@ python3 $1.py printf "\n[Compile Script]: Convert TF model to LLVM IR\n" python3 -m tf2onnx.convert --saved-model $1.tf --output model.onnx -python3 ../../tools/ExtendONNXModel.py model.onnx extendedmodel.onnx > expected_op_seq.txt +python3 ../../tools/ExtendONNXModel.py --model_path ./model.onnx --output_model_path ./extendedmodel.onnx > expected_op_seq.txt onnx-mlir --EmitLLVMIR extendedmodel.onnx --instrument-onnx-ops="ALL" --InstrumentBeforeOp --InstrumentAfterOp mlir-translate -mlir-to-llvmir extendedmodel.onnx.mlir > model.mlir.ll @@ -12,4 +12,3 @@ clang -S -emit-llvm image.c -I$ONNX_MLIR_SRC/include -o main.ll llvm-link -o model.ll -S main.ll model.mlir.ll printf "\n[Compile Script]: Compilation complete\n" - diff --git a/sample_programs/mnist/image.c b/sample_programs/mnist/image.c index 91b4a928..3f915419 100644 --- a/sample_programs/mnist/image.c +++ b/sample_programs/mnist/image.c @@ -13,11 +13,14 @@ #include #include #include +#include #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define RANK 2 +#define NUM_OUTPUT_CLASSES 10 // For MNIST, there are 10 possible digits. + OMTensorList *run_main_graph(OMTensorList *); void export_layer_output_to_json(OMTensorList *, char*, char*); @@ -42,24 +45,34 @@ int main(int argc, char *argv[]) { OMTensor *x1 = omTensorCreate(rgb_image, in_shape, RANK, ONNX_TYPE_FLOAT); OMTensor *img_list[1] = {x1}; OMTensorList *input = omTensorListCreate(img_list, 1); + + struct timeval start, end; + gettimeofday(&start, NULL); // Call the compiled onnx model function. OMTensorList *outputList = run_main_graph(input); + gettimeofday(&end, NULL); + + double time_taken = end.tv_sec + end.tv_usec / 1e6 - + start.tv_sec - start.tv_usec / 1e6; // in seconds + + + printf("Time taken to execute the model: %f\n", time_taken); + // Export layer outputs to a JSON file export_layer_output_to_json(outputList, savefilename, output_seq); - // Get the last omt as output. + // Get the first omt as output. OMTensor *output = omTensorListGetOmtByIndex(outputList, omTensorListGetSize(outputList) - 1); float *outputPtr = (float *)omTensorGetDataPtr(output); - // Print out elements of the last tensor - int64_t *shape = omTensorGetShape(output); - int64_t numElements = (int64_t) (omTensorGetNumElems(output) / shape[0]); printf("Final prediction for %s is: ", filename); - for (int i = 0; i < numElements; i++) + // Print its content, should be in softmax form + for (int i = 0; i < NUM_OUTPUT_CLASSES; i++) printf("%f ", outputPtr[i]); + printf("\n"); stbi_image_free(rgb_image); diff --git a/sample_programs/mnist/input.yaml b/sample_programs/mnist/input.yaml index 70df6e67..d23b428b 100644 --- a/sample_programs/mnist/input.yaml +++ b/sample_programs/mnist/input.yaml @@ -4,8 +4,8 @@ compileOption: include: - CustomTensorOperator options: - - -layerNo=1;1 - - -layerName=conv;relu + - -layerNo=0;0;0;0;0;0;0 + - -layerName=conv;relu;matmul;maxpool;add;avgpool;softmax regSelMethod: regloc regloc: dstreg @@ -23,8 +23,8 @@ compileOption: runOption: - run: - numOfRuns: 50 + numOfRuns: 1000 fi_type: bitflip window_len_multiple_startindex: 1 window_len_multiple_endindex: 500 - fi_max_multiple: 9 + fi_max_multiple: 2