Skip to content

Commit

Permalink
added shufflenet, fixed profiling issue (#18)
Browse files Browse the repository at this point in the history
* added shufflenet, fixed profiling issue

* minor fixes
  • Loading branch information
AnushreeBannadabhavi authored Jul 16, 2022
1 parent 973a74c commit c3ce575
Show file tree
Hide file tree
Showing 11 changed files with 298 additions and 5 deletions.
2 changes: 1 addition & 1 deletion bin/injectfault.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ def checkValues(key, val, var1 = None,var2 = None,var3 = None,var4 = None):
##BEHROOZ: Add max number of target locations
elif key == "fi_max_multiple":
assert isinstance(val, int)==True, key+" must be an integer in input.yaml"
assert int(val) >1, key+" must be greater than one in input.yaml"
assert int(val) >0, key+" must be greater than zero in input.yaml"
assert int(val) <=int(fi_max_multiple_default), key+" must be smaller than or equal to "+str(fi_max_multiple_default)+ " in input.yaml"
##==============================================================

Expand Down
13 changes: 12 additions & 1 deletion llvm_passes/core/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,17 @@ Instruction *getTermInstofFunction(Function *func) {
return ret;
}

void getAllTermInstofFunction(Function *func, std::set<Instruction*> &exitinsts) {

for (auto i = inst_begin(func); i != inst_end(func); i++) {
Instruction* ret = &*i;

if (isa<ReturnInst>(ret) || isa<ResumeInst>(ret) ||
isa<UnreachableInst>(ret))
exitinsts.insert(ret);
}
}

void getProgramExitInsts(Module &M, std::set<Instruction*> &exitinsts) {
for (Module::iterator m_it = M.begin(); m_it != M.end(); ++m_it) {
if (!m_it->isDeclaration()) {
Expand All @@ -76,7 +87,7 @@ void getProgramExitInsts(Module &M, std::set<Instruction*> &exitinsts) {
}

Function* mainfunc = M.getFunction("main");
exitinsts.insert(getTermInstofFunction(mainfunc));
getAllTermInstofFunction(mainfunc, exitinsts);
}

Instruction *getInsertPtrforRegsofInst(Value *reg, Instruction *inst) {
Expand Down
2 changes: 1 addition & 1 deletion runtime_lib/FaultInjectionLib.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ void _parseLLFIConfigFile() {
//==============================================================
/*BEHROOZ: Add multiple corrupted regs*/
} else if (strcmp(option, "fi_max_multiple") == 0){
assert(atoll(value) > 1 && "invalid fi_max_multiple in config file");
assert(atoll(value) > 0 && "invalid fi_max_multiple in config file");
config.fi_max_multiple = atoi(value);
} else if (strcmp(option, "fi_next_cycle") == 0){
assert(atoll(value) > 0 && "invalid fi_next_cycle in config file");
Expand Down
4 changes: 2 additions & 2 deletions runtime_lib/ProfilingLib.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

#include "Utils.h"

static long long opcodecount[OPCODE_CYCLE_ARRAY_LEN] = {0};
static long long unsigned opcodecount[OPCODE_CYCLE_ARRAY_LEN] = {0};

void doProfiling(int opcode) {
assert(opcodecount[opcode] >= 0 &&
Expand All @@ -27,7 +27,7 @@ void endProfiling() {
getOpcodeExecCycleArray(OPCODE_CYCLE_ARRAY_LEN, opcode_cycle_arr);

unsigned i = 0;
long long total_cycle = 0;
long long unsigned total_cycle = 0;
for (i = 0; i < 100; ++i) {
assert(total_cycle >= 0 &&
"total dynamic instruction cycle too large to be handled by llfi");
Expand Down
13 changes: 13 additions & 0 deletions sample_programs/shufflenetv2_10/compile.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
printf "\n[Compile Script]: Convert TF model to LLVM IR\n"
onnx-mlir --EmitLLVMIR --instrument-onnx-ops="ALL" --InstrumentBeforeOp --InstrumentAfterOp $1.onnx
mlir-translate -mlir-to-llvmir $1.onnx.mlir > model.mlir.ll

printf "\n[Compile Script]: Compile main driver program and link to TF model in LLVM IR\n"
clang++ -DONNX_ML=1 image.c -o main.ll -O0 -S -emit-llvm -lonnx_proto -lprotobuf -I$ONNX_MLIR_SRC/include
llvm-link -o model.ll -S main.ll model.mlir.ll

printf "\n[Compile Script]: Generate model.exe \n"
/home/llvm-project/build/bin/llc -filetype=obj -o model.o model.ll -O0 --relocation-model=pic
clang++ -o model.exe model.o -L/home/LLTFI/LLTFI/build/bin/../runtime_lib -lllfi-rt -lpthread -L /Debug/lib -Wl,-rpath /home/LLTFI/LLTFI/build/bin/../runtime_lib -I$ONNX_MLIR_SRC/include -O0 -lonnx_proto -lprotobuf -lcruntime -ljson-c

printf "\n[Compile Script]: Compilation complete\n"
235 changes: 235 additions & 0 deletions sample_programs/shufflenetv2_10/image.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,235 @@
/*
* image.c - Sample program for Shufflenet
*
*
*
*/

#include <stdio.h>
#include <OnnxMlirRuntime.h>
#include "json-c/json.h"
#include <string.h>
#include <assert.h>
#include <stdlib.h>
#include "onnx/onnx_pb.h"
#include <fstream>
#include <vector>

#define NUM_INPUTS 1 // When using the same image.c file for a different model, specify the number of inputs here depending on the model

using namespace std;

extern "C" {
OMTensorList *run_main_graph(OMTensorList *);
}

void export_layer_output_to_json(OMTensorList *, char*, char*);

int main(int argc, char *argv[]) {

//Input pointers needed for the model
char *inp[NUM_INPUTS];
char* savefilename = "layeroutput.txt";
char* output_seq = NULL;
vector<void*> heapAllocs;

unsigned int numArguments = NUM_INPUTS+2;
if (argc == numArguments) {
for (int i = 0; i < NUM_INPUTS; i++){
inp[i] = argv[i+1];
}
output_seq = argv[NUM_INPUTS+1];
} else {
printf("Must supply the path to an image file.\n");
}

OMTensor *img_list[NUM_INPUTS];
for (int i = 0; i < NUM_INPUTS; i++) {
onnx::TensorProto input;
std::ifstream in(inp[i], std::ios_base::binary);
input.ParseFromIstream(&in);

auto d = input.dims();

// When using the same image.c file for a different model, Change "float" to the type of input data that the model expects
float* input_data {reinterpret_cast<float *>(const_cast<char*>(input.raw_data().data()))};
float* heap_input_data;
long *in_shape = (long*)malloc(d.size()*sizeof(long));
heapAllocs.push_back((void*)in_shape);

int64_t totalSize = 1;
std::cout<< d.size()<< std::endl;
for(int j =0; j < d.size(); j++){
in_shape[j] = d[j];
totalSize *= d[j];
std::cout << "in_shape[i] " << in_shape[j] << endl;
}

heap_input_data = (float*)malloc(sizeof(float) * totalSize);
memcpy(heap_input_data, input_data, totalSize * sizeof(float));


//When using the same image.c file for a different model, Change ONNX_TYPE depending on the input type
OMTensor *x1 = omTensorCreate(heap_input_data, in_shape, d.size(), ONNX_TYPE_FLOAT);
img_list[i] = x1;
heapAllocs.push_back((void*)heap_input_data);
}

OMTensorList *graph_input = omTensorListCreate(img_list, 4);

// Call the compiled onnx model function.
OMTensorList *outputList = run_main_graph(graph_input);

// Export layer outputs to a JSON file
export_layer_output_to_json(outputList, savefilename, output_seq);

for (void* ptr : heapAllocs) {
free(ptr);
}

return 0;
}

// Function to split a string
char** str_split(char* a_str, const char a_delim, int* len)
{
char** result = 0;
size_t count = 0;
char* tmp = a_str;
char* last_comma = 0;
char delim[2];
delim[0] = a_delim;
delim[1] = 0;
*len = 0;

/* Count how many elements will be extracted. */
while (*tmp)
{
if (a_delim == *tmp)
{
count++;
last_comma = tmp;
}
tmp++;
}

/* Add space for trailing token. */
count += last_comma < (a_str + strlen(a_str) - 1);

/* Add space for terminating null string so caller
knows where the list of returned strings ends. */
count++;

result = (char**)malloc(sizeof(char*) * count);

if (result)
{
size_t idx = 0;
char* token = strtok(a_str, delim);

while (token)
{
assert(idx < count);
*(result + idx++) = strdup(token);
token = strtok(0, delim);

*len = (*len) + 1;
}
assert(idx == count - 1);
}

return result;
}

// Convert a list of char**to int*
int* convert_to_int(char** list, int count)
{
int* result = (int*) malloc(count*sizeof(int));

for (int i = 0 ; i < count; i++)
{
result[i] = atoi(list[i]);
}

return result;
}

// Turn this on for debugging JSON creater.
// #define DEBUG_MSG(...) printf(__VA_ARGS__)
#define DEBUG_MSG(...)

//Function to export layer outputs to JSON format.
void export_layer_output_to_json(OMTensorList *outputList, char* savefile, char* expected_op_seq)
{

int count = 0;
char** tokens = str_split(expected_op_seq, ',', &count);
int* layer_seq = convert_to_int(tokens, count);

// Global JSON object
json_object* jobj = json_object_new_object();

for (int64_t i = 0; i < omTensorListGetSize(outputList); i++) {

// JSON object for this layer
json_object* jobj_layer = json_object_new_object();

DEBUG_MSG("Reading output of layer %lu\n", i);

OMTensor *omt = omTensorListGetOmtByIndex(outputList, i);

// Get properties of the tensor that you want to export to the JSON file
int64_t rank = omTensorGetRank(omt);
int64_t *shape = omTensorGetShape(omt);
int64_t numElements = (int64_t) (omTensorGetNumElems(omt) / shape[0]);
float *dataBuf = (float *)omTensorGetDataPtr(omt);
int64_t bufferSize = omTensorGetBufferSize(omt);

DEBUG_MSG("Rank: %lu \nNumber of elements: %lu \n", rank, numElements);
DEBUG_MSG("Shape: ");
for (int64_t j = 0; j < rank; j++){
DEBUG_MSG("%lu, ", shape[j]);
}
DEBUG_MSG("\n");

DEBUG_MSG("Buffer Size: %lu\n", bufferSize);

json_object *JLayerId = json_object_new_int(layer_seq[i]);
json_object *JRank = json_object_new_int(rank);
json_object *JNumElements = json_object_new_int(numElements);

json_object_object_add(jobj_layer, "Layer Id", JLayerId);
json_object_object_add(jobj_layer, "Rank", JRank);
json_object_object_add(jobj_layer, "Number of Elements", JNumElements);


json_object* JShape = json_object_new_array();
for (int64_t j = 0; j < rank; j++) {
json_object* temp = json_object_new_int(shape[j]);
json_object_array_add(JShape, temp);
}


json_object* JData = json_object_new_array();
for (int64_t j = 0; j < numElements; j++) {
json_object* temp = json_object_new_double(dataBuf[j]);
json_object_array_add(JData, temp);
}

json_object_object_add(jobj_layer, "Shape", JShape);
json_object_object_add(jobj_layer, "Data", JData);

char str[5];
sprintf(str, "%ld", i);
json_object_object_add(jobj, str, jobj_layer);
}

// Free heap memory
free(tokens);
free(layer_seq);

char* val = (char*) json_object_get_string(jobj);
FILE* save = fopen(savefile, "w+");
fputs(val, save);
fclose(save);
}
30 changes: 30 additions & 0 deletions sample_programs/shufflenetv2_10/input.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
compileOption:
instSelMethod:
- customInstselector:
include:
- CustomTensorOperator
options:
- -layerNo=0
- -layerName=all

regSelMethod: regloc
regloc: dstreg

includeInjectionTrace:
- forward

tracingPropagation: False # trace dynamic instruction values.

tracingPropagationOption:
maxTrace: 250 # max number of instructions to trace during fault injection run
debugTrace: False
mlTrace: False # enable for tracing ML programs
generateCDFG: True

runOption:
- run:
numOfRuns: 1000
fi_type: bitflip
window_len_multiple_startindex: 1
window_len_multiple_endindex: 500
fi_max_multiple: 1
Binary file added sample_programs/shufflenetv2_10/input_0.pb
Binary file not shown.
Binary file added sample_programs/shufflenetv2_10/model.onnx
Binary file not shown.
Binary file added sample_programs/shufflenetv2_10/output_0.pb
Binary file not shown.
4 changes: 4 additions & 0 deletions sample_programs/shufflenetv2_10/runllfi.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
rm -rf llfi*
$LLFI_BUILD_ROOT/bin/instrument --readable -L $ONNX_MLIR_BUILD/Debug/lib -lcruntime -ljson-c -lprotobuf -lonnx_proto model.ll
$LLFI_BUILD_ROOT/bin/profile ./llfi/model-profiling.exe input_0.pb 0
$LLFI_BUILD_ROOT/bin/injectfault ./llfi/model-faultinjection.exe input_0.pb 0

0 comments on commit c3ce575

Please sign in to comment.