Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LLVM code generation #171

Closed
wants to merge 39 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
a2592b1
Beginning of LLVM codegen
Jul 30, 2018
888fc13
More LLVM implementation
Jul 30, 2018
a134e80
More nodes in LLVM codegen
Jul 30, 2018
e796cae
Formatting
Jul 30, 2018
579bce3
Deal with new Literal interface; implement IfThenElse
Jul 30, 2018
1475d95
A few more, plus some formatting fixes
Jul 30, 2018
5e88be6
Implement sqrt
Aug 1, 2018
e0df968
Possible implementation of switch-- but likely needs to be debugged o…
Aug 1, 2018
6883896
Start implementing function compilation
Aug 1, 2018
9f1e8fd
GetProperty and Allocate codegen, plus enable printing out/verifying …
Aug 2, 2018
2ba614b
For loop codegen. Need to redo while loops. Blocked on AST not bein…
Aug 3, 2018
2b40abc
Merge branch 'master' into llvm-codegen
Aug 6, 2018
f692b1d
While loops, store, load implemented.
Aug 6, 2018
a1d7f4d
Merge branch 'master' into llvm-codegen
Aug 6, 2018
918685e
Merge remote-tracking branch 'origin/lower-rewrite' into llvm-codegen
Aug 6, 2018
f586cca
Modify LLVM codegen for new IR refactoring
Aug 6, 2018
4455221
Fix GEP usage in GetProperty
Aug 7, 2018
9be4131
Minor target parsing bug fixes
Aug 7, 2018
6fe42ba
Merge remote-tracking branch 'origin/lower-rewrite' into llvm-codegen
Aug 7, 2018
8a695a2
Adding temporary JIT mode
Aug 7, 2018
513078d
Checkpoint
Aug 10, 2018
c5e731b
Minor fixes for LLVM 7
Sep 24, 2018
043ac16
Merge branch 'master' of https://github.com/tensor-compiler/taco into…
Sep 24, 2018
3d3c9e3
Refactor FindVars to be available for LLVM codegen
Sep 24, 2018
8acc0ab
Checkpoint: fix several bugs. Support realloc. All lower tests run.
Sep 27, 2018
b890f34
Fix literal 0 in negation.
Sep 27, 2018
b84bcd6
Minor
Sep 27, 2018
deb7ac4
All tests pass
Sep 27, 2018
d7534d6
Merge branch 'master' of https://github.com/tensor-compiler/taco into…
Sep 27, 2018
d6e8944
Account for minor API change
Sep 27, 2018
0f6b248
Cleanup Allocate codegen
Sep 27, 2018
e4c84ad
Minor cleanup
Sep 27, 2018
8797fe7
Add Print codegen
Sep 28, 2018
4cb8540
Change all GEPs to explicitly state they are within bounds
Sep 28, 2018
e23f9f9
Generate shims with LLVM
Sep 28, 2018
d94fe49
Cleanup
Sep 28, 2018
d2ebbf1
Add some basic optimization passes
Sep 28, 2018
66e55bb
Minor cleanups.
Oct 1, 2018
3878a96
Merge branch 'master' of https://github.com/tensor-compiler/taco into…
Oct 1, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion include/taco/target.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ struct Target {
Target(const std::string &s);

Target(Arch a, OS o) : arch(a), os(o) {
taco_tassert(a == C99 && o != Windows && o != OSUnknown)
taco_tassert(o != Windows && o != OSUnknown)
<< "Unsupported target.";
}

Expand All @@ -40,3 +40,4 @@ struct Target {
} // namespace taco

#endif

16 changes: 14 additions & 2 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,17 @@ else()
message("-- Static library")
endif()

find_package(LLVM REQUIRED CONFIG)
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")

include_directories(${LLVM_INCLUDE_DIRS})
add_definitions(${LLVM_DEFINITIONS})

# Find the libraries that correspond to the LLVM components
# that we wish to use
llvm_map_components_to_libnames(llvm_libs core mcjit bitwriter linker X86 passes)

set(TACO_SRC_DIRS . parser index_notation lower ir codegen storage error util)

foreach(dir ${TACO_SRC_DIRS})
Expand All @@ -23,7 +34,8 @@ add_library(taco ${TACO_LIBRARY_TYPE} ${TACO_HEADERS} ${TACO_SOURCES})
install(TARGETS taco DESTINATION lib)

if (LINUX)
target_link_libraries(taco PRIVATE ${TACO_LIBRARIES} dl)
target_link_libraries(taco PRIVATE ${TACO_LIBRARIES} dl ${llvm_libs})
else()
target_link_libraries(taco PRIVATE ${TACO_LIBRARIES})
target_link_libraries(taco PRIVATE ${TACO_LIBRARIES} ${llvm_libs})
endif()

162 changes: 71 additions & 91 deletions src/codegen/codegen_c.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,98 +49,8 @@ const string cHeaders =
"#endif\n"
"#endif\n";

// find variables for generating declarations
// also only generates a single var for each GetProperty
class FindVars : public IRVisitor {
public:
map<Expr, string, ExprCompare> varMap;

// the variables for which we need to add declarations
map<Expr, string, ExprCompare> varDecls;

// this maps from tensor, property, mode, index to the unique var
map<tuple<Expr, TensorProperty, int, int>, string> canonicalPropertyVar;

// this is for convenience, recording just the properties unpacked
// from the output tensor so we can re-save them at the end
map<tuple<Expr, TensorProperty, int, int>, string> outputProperties;

// TODO: should replace this with an unordered set
vector<Expr> outputTensors;

// copy inputs and outputs into the map
FindVars(vector<Expr> inputs, vector<Expr> outputs) {
for (auto v: inputs) {
auto var = v.as<Var>();
taco_iassert(var) << "Inputs must be vars in codegen";
taco_iassert(varMap.count(var) == 0) <<
"Duplicate input found in codegen";
varMap[var] = var->name;
}
for (auto v: outputs) {
auto var = v.as<Var>();
taco_iassert(var) << "Outputs must be vars in codegen";
taco_iassert(varMap.count(var) == 0) <<
"Duplicate output found in codegen";

outputTensors.push_back(v);
varMap[var] = var->name;
}
inVarAssignLHSWithDecl = false;
}

protected:
bool inVarAssignLHSWithDecl;
using IRVisitor::visit;

virtual void visit(const For *op) {
// Don't need to find/initialize loop bounds
inVarAssignLHSWithDecl = true;
op->var.accept(this);
op->start.accept(this);
op->end.accept(this);
op->increment.accept(this);
inVarAssignLHSWithDecl = false;

op->contents.accept(this);
}

virtual void visit(const Var *op) {
if (varMap.count(op) == 0) {
varMap[op] = CodeGen_C::genUniqueName(op->name);
if (!inVarAssignLHSWithDecl) {
varDecls[op] = varMap[op];
}
}
}

virtual void visit(const VarDecl *op) {
inVarAssignLHSWithDecl = true;
op->var.accept(this);
inVarAssignLHSWithDecl = false;
op->rhs.accept(this);
}

virtual void visit(const GetProperty *op) {
if (varMap.count(op) == 0) {
auto key =
tuple<Expr,TensorProperty,int,int>(op->tensor,op->property,
(size_t)op->mode,
(size_t)op->index);
if (canonicalPropertyVar.count(key) > 0) {
varMap[op] = canonicalPropertyVar[key];
} else {
auto unique_name = CodeGen_C::genUniqueName(op->name);
canonicalPropertyVar[key] = unique_name;
varMap[op] = unique_name;
varDecls[op] = unique_name;
if (util::contains(outputTensors, op->tensor)) {
outputProperties[key] = unique_name;
}
}
}
}
};



// helper to translate from taco type to C type
Expand Down Expand Up @@ -437,6 +347,76 @@ string printFuncName(const Function *func) {

} // anonymous namespace


// copy inputs and outputs into the map
CodeGen_C::FindVars::FindVars(vector<Expr> inputs, vector<Expr> outputs) {
for (auto v: inputs) {
auto var = v.as<Var>();
taco_iassert(var) << "Inputs must be vars in codegen";
taco_iassert(varMap.count(var) == 0) <<
"Duplicate input found in codegen";
varMap[var] = var->name;
}
for (auto v: outputs) {
auto var = v.as<Var>();
taco_iassert(var) << "Outputs must be vars in codegen";
taco_iassert(varMap.count(var) == 0) <<
"Duplicate output found in codegen";

outputTensors.push_back(v);
varMap[var] = var->name;
}
inVarAssignLHSWithDecl = false;
}


void CodeGen_C::FindVars::visit(const For *op) {
// Don't need to find/initialize loop bounds
inVarAssignLHSWithDecl = true;
op->var.accept(this);
op->start.accept(this);
op->end.accept(this);
op->increment.accept(this);
inVarAssignLHSWithDecl = false;

op->contents.accept(this);
}

void CodeGen_C::FindVars::visit(const Var *op) {
if (varMap.count(op) == 0) {
varMap[op] = CodeGen_C::genUniqueName(op->name);
if (!inVarAssignLHSWithDecl) {
varDecls[op] = varMap[op];
}
}
}

void CodeGen_C::FindVars::visit(const VarDecl *op) {
inVarAssignLHSWithDecl = true;
op->var.accept(this);
inVarAssignLHSWithDecl = false;
op->rhs.accept(this);
}

void CodeGen_C::FindVars::visit(const GetProperty *op) {
if (varMap.count(op) == 0) {
auto key =
tuple<Expr,TensorProperty,int,int>(op->tensor,op->property,
(size_t)op->mode,
(size_t)op->index);
if (canonicalPropertyVar.count(key) > 0) {
varMap[op] = canonicalPropertyVar[key];
} else {
auto unique_name = CodeGen_C::genUniqueName(op->name);
canonicalPropertyVar[key] = unique_name;
varMap[op] = unique_name;
varDecls[op] = unique_name;
if (util::contains(outputTensors, op->tensor)) {
outputProperties[key] = unique_name;
}
}
}
}

string CodeGen_C::genUniqueName(string name) {
stringstream os;
Expand Down
32 changes: 32 additions & 0 deletions src/codegen/codegen_c.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,36 @@ class CodeGen_C : public IRPrinter {
/// a mix of taco_tensor_t* and scalars into a function call
static void generateShim(const Stmt& func, std::stringstream &stream);

// find variables for generating declarations
// also only generates a single var for each GetProperty
class FindVars : public IRVisitor {
public:
FindVars(std::vector<Expr> inputs, std::vector<Expr> outputs);

std::map<Expr, std::string, ExprCompare> varMap;

// the variables for which we need to add declarations
std::map<Expr, std::string, ExprCompare> varDecls;

// this maps from tensor, property, mode, index to the unique var
std::map<std::tuple<Expr, TensorProperty, int, int>, std::string> canonicalPropertyVar;

// this is for convenience, recording just the properties unpacked
// from the output tensor so we can re-save them at the end
std::map<std::tuple<Expr, TensorProperty, int, int>, std::string> outputProperties;

// TODO: should replace this with an unordered set
std::vector<Expr> outputTensors;
protected:
bool inVarAssignLHSWithDecl;
using IRVisitor::visit;

virtual void visit(const For *op);
virtual void visit(const Var *op);
virtual void visit(const VarDecl *op);
virtual void visit(const GetProperty *op);
};

protected:
using IRPrinter::visit;
void visit(const Function*);
Expand All @@ -47,6 +77,8 @@ class CodeGen_C : public IRPrinter {
std::ostream &out;

OutputKind outputKind;


};

} // namespace ir
Expand Down
Loading