Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CODEGEN/LLVM] Initial support for codegen LLVM. #49

Merged
merged 2 commits into from
Feb 22, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion HalideIR
Submodule HalideIR updated from e68ae6 to 1a11a6
42 changes: 26 additions & 16 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
ifndef config
ifneq ("$(wildcard ./config.mk)","")
config = config.mk
config ?= config.mk
else
config = make/config.mk
config ?= make/config.mk
endif
endif

Expand All @@ -19,31 +19,24 @@ SRC = $(wildcard src/*.cc src/*/*.cc src/*/*/*.cc)
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
ALL_DEP = $(ALL_OBJ) $(LIB_HALIDE_IR)

ifneq ($(USE_CUDA_PATH), NONE)
NVCC=$(USE_CUDA_PATH)/bin/nvcc
endif

export LDFLAGS = -pthread -lm
export CFLAGS = -std=c++11 -Wall -O2\
-Iinclude -Idmlc-core/include -IHalideIR/src -fPIC
export FRAMEWORKS=

ifneq ($(ADD_CFLAGS), NONE)
CFLAGS += $(ADD_CFLAGS)
endif
export CFLAGS = -std=c++11 -Wall -O2 -fno-rtti\
-Iinclude -Idmlc-core/include -IHalideIR/src -fPIC -DDMLC_ENABLE_RTTI=0

ifneq ($(ADD_LDFLAGS), NONE)
LDFLAGS += $(ADD_LDFLAGS)
ifdef CUDA_PATH
NVCC=$(CUDA_PATH)/bin/nvcc
CFLAGS += -I$(CUDA_PATH)/include
LDFLAGS += -L$(CUDA_PATH)/lib64
endif


ifeq ($(USE_CUDA), 1)
CFLAGS += -DTVM_CUDA_RUNTIME=1
LDFLAGS += -lcuda -lcudart -lnvrtc
else
CFLAGS += -DTVM_CUDA_RUNTIME=0
endif

FRAMEWORKS=

ifeq ($(USE_OPENCL), 1)
CFLAGS += -DTVM_OPENCL_RUNTIME=1
Expand All @@ -57,6 +50,23 @@ else
CFLAGS += -DTVM_OPENCL_RUNTIME=0
endif

# llvm configuration
LLVM_CONFIG=llvm-config

ifeq ($(USE_LLVM), 1)
LLVM_VERSION=$(shell $(LLVM_CONFIG) --version| cut -b 1,3)
LLVM_INCLUDE=$(filter -I%, $(shell $(LLVM_CONFIG) --cxxflags))
LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags --libs --system-libs)
CFLAGS += $(LLVM_INCLUDE) -DTVM_LLVM_VERSION=$(LLVM_VERSION)
endif

ifdef $(ADD_CFLAGS)
CFLAGS += $(ADD_CFLAGS)
endif

ifdef $(ADD_LDFLAGS)
LDFLAGS += $(ADD_LDFLAGS)
endif

include tests/cpp/unittest.mk

Expand Down
2 changes: 1 addition & 1 deletion include/tvm/buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class BufferNode : public Node {
Type dtype);

static constexpr const char* _type_key = "Buffer";
TVM_DECLARE_NODE_TYPE_INFO(BufferNode);
TVM_DECLARE_NODE_TYPE_INFO(BufferNode, Node);
};

inline const BufferNode* Buffer::operator->() const {
Expand Down
7 changes: 7 additions & 0 deletions include/tvm/codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@ PackedFunc BuildStackVM(
LoweredFunc func,
const std::unordered_map<LoweredFunc, PackedFunc>& device_funcs);

/*!
* \brief Build a LLVM VM function, this is still beta
* \param func The LoweredFunc to be build
* \return A packed function representing the func.
*/
PackedFunc BuildLLVM(LoweredFunc func);

/*!
* \brief Build a CUDA function with NVRTC
*
Expand Down
3 changes: 1 addition & 2 deletions include/tvm/expr.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ using Halide::Internal::make_zero;
using Halide::Internal::as_const_int;
using Halide::Internal::as_const_uint;


inline Type TVMType2Type(TVMType t) {
return Type(static_cast<halide_type_code_t>(t.code), t.bits, t.lanes);
}
Expand Down Expand Up @@ -182,7 +181,7 @@ class IterVarNode : public Node {
static IterVar make(Range dom, Var var, std::string thread_tag);

static constexpr const char* _type_key = "IterVar";
TVM_DECLARE_NODE_TYPE_INFO(IterVarNode);
TVM_DECLARE_NODE_TYPE_INFO(IterVarNode, Node);
};

// inline implementations
Expand Down
2 changes: 2 additions & 0 deletions include/tvm/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,8 @@ using Halide::Internal::Realize;
using Halide::Internal::Block;
using Halide::Internal::IfThenElse;
using Halide::Internal::Evaluate;
// ir functions
using Halide::Internal::is_const_power_of_two_integer;

} // namespace ir
} // namespace tvm
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/lowered_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ class LoweredFuncNode : public FunctionBaseNode {
}

static constexpr const char* _type_key = "LoweredFunc";
TVM_DECLARE_NODE_TYPE_INFO(LoweredFuncNode);
TVM_DECLARE_NODE_TYPE_INFO(LoweredFuncNode, Node);
};

// Implementations of inline functions
Expand Down
6 changes: 3 additions & 3 deletions include/tvm/operation.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class PlaceholderOpNode : public OperationNode {
Type dtype);

static constexpr const char* _type_key = "PlaceholderOp";
TVM_DECLARE_NODE_TYPE_INFO(PlaceholderOpNode);
TVM_DECLARE_NODE_TYPE_INFO(PlaceholderOpNode, OperationNode);
};

/*!
Expand Down Expand Up @@ -74,7 +74,7 @@ class ComputeOpNode : public OperationNode {
Expr body);

static constexpr const char* _type_key = "ComputeOp";
TVM_DECLARE_NODE_TYPE_INFO(ComputeOpNode);
TVM_DECLARE_NODE_TYPE_INFO(ComputeOpNode, OperationNode);
};

/*!
Expand Down Expand Up @@ -123,7 +123,7 @@ class ScanOpNode : public OperationNode {
Array<Tensor> state_placeholder);

static constexpr const char* _type_key = "ScanOp";
TVM_DECLARE_NODE_TYPE_INFO(ScanOpNode);
TVM_DECLARE_NODE_TYPE_INFO(ScanOpNode, OperationNode);
};


Expand Down
2 changes: 1 addition & 1 deletion include/tvm/packed_func_ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ struct NodeTypeChecker {
// It can be turned off, but will make non strict checking.
// TODO(tqchen) possibly find alternative to turn of RTTI
using ContainerType = typename T::ContainerType;
return (dynamic_cast<ContainerType*>(sptr) != nullptr);
return sptr->derived_from<ContainerType>();
}
static inline void PrintName(std::ostringstream& os) { // NOLINT(*)
using ContainerType = typename T::ContainerType;
Expand Down
11 changes: 9 additions & 2 deletions include/tvm/runtime/c_runtime_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,13 @@ typedef void* TVMRetValueHandle;
/*! \brief the array handle */
typedef TVMArray* TVMArrayHandle;

/*!
* \brief Used for implementing C API function.
* Set last error message before return.
* \param msg The error message to be set.
*/
TVM_DLL void TVMAPISetLastError(const char* msg);

/*!
* \brief return str message of the last error
* all function in this file will return 0 when success
Expand Down Expand Up @@ -287,10 +294,10 @@ TVM_DLL int TVMCFuncSetReturn(TVMRetValueHandle ret,
* \param num_args Number of arguments.
* \param ret The return value handle.
* \param resource_handle The handle additional resouce handle from fron-end.
*
* \return 0 if success, -1 if failure happens, set error via TVMAPISetLastError.
* \sa TVMCFuncSetReturn
*/
typedef void (*TVMPackedCFunc)(
typedef int (*TVMPackedCFunc)(
TVMValue* args,
int* type_codes,
int num_args,
Expand Down
15 changes: 9 additions & 6 deletions include/tvm/schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ class StageNode : public Node {
}

static constexpr const char* _type_key = "Stage";
TVM_DECLARE_NODE_TYPE_INFO(StageNode);
TVM_DECLARE_NODE_TYPE_INFO(StageNode, Node);
};

/*! \brief node container for schedule */
Expand All @@ -354,7 +354,7 @@ class ScheduleNode : public Node {
}

static constexpr const char* _type_key = "Schedule";
TVM_DECLARE_NODE_TYPE_INFO(ScheduleNode);
TVM_DECLARE_NODE_TYPE_INFO(ScheduleNode, Node);
};

/*! \brief node container for IterVar attr */
Expand All @@ -368,11 +368,14 @@ class IterVarAttrNode : public Node {
}

static constexpr const char* _type_key = "IterVarAttr";
TVM_DECLARE_NODE_TYPE_INFO(IterVarAttrNode);
TVM_DECLARE_NODE_TYPE_INFO(IterVarAttrNode, Node);
};

/*! \brief base node of iteration var */
class IterVarRelationNode : public Node {
public:
static constexpr const char* _type_key = "IterVarRelation";
TVM_DECLARE_BASE_NODE_INFO(IterVarRelationNode, Node);
};

/*!
Expand Down Expand Up @@ -402,7 +405,7 @@ class SplitNode : public IterVarRelationNode {
IterVar inner, Expr factor);

static constexpr const char* _type_key = "Split";
TVM_DECLARE_NODE_TYPE_INFO(SplitNode);
TVM_DECLARE_NODE_TYPE_INFO(SplitNode, IterVarRelationNode);
};

/*!
Expand All @@ -427,7 +430,7 @@ class FuseNode : public IterVarRelationNode {
IterVar outer, IterVar inner, IterVar fused);

static constexpr const char* _type_key = "Fuse";
TVM_DECLARE_NODE_TYPE_INFO(FuseNode);
TVM_DECLARE_NODE_TYPE_INFO(FuseNode, IterVarRelationNode);
};

/*!
Expand All @@ -450,7 +453,7 @@ class RebaseNode : public IterVarRelationNode {
static IterVarRelation make(IterVar parent, IterVar rebased);

static constexpr const char* _type_key = "Rebase";
TVM_DECLARE_NODE_TYPE_INFO(RebaseNode);
TVM_DECLARE_NODE_TYPE_INFO(RebaseNode, IterVarRelationNode);
};


Expand Down
6 changes: 3 additions & 3 deletions include/tvm/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ class TensorNode : public Node {
int value_index);

static constexpr const char* _type_key = "Tensor";
TVM_DECLARE_NODE_TYPE_INFO(TensorNode);
TVM_DECLARE_NODE_TYPE_INFO(TensorNode, Node);
};

/*!
Expand All @@ -167,8 +167,6 @@ class OperationNode : public FunctionBaseNode {
const std::string& func_name() const final {
return name;
}
/*! \return number of outputs of this op */
virtual int num_outputs() const = 0;
/*! \return the list of iteration variable at root */
virtual Array<IterVar> root_iter_vars() const = 0;
/*! \return type of i-th output */
Expand All @@ -177,6 +175,8 @@ class OperationNode : public FunctionBaseNode {
virtual Array<Expr> output_shape(size_t i) const = 0;

static constexpr const char* _type_key = "Operation";

TVM_DECLARE_BASE_NODE_INFO(OperationNode, Node);
};

// Implementations of inline functions
Expand Down
13 changes: 7 additions & 6 deletions make/config.mk
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,11 @@ USE_CUDA = 1
# whether use OpenCL during compile
USE_OPENCL = 0

# add the path to CUDA library to link and compile flag
# if you have already add them to environment variable, leave it as NONE
# USE_CUDA_PATH = /usr/local/cuda
USE_CUDA_PATH = NONE
# whether build with LLVM support
# This requires llvm-config to be in your PATH
# Requires LLVM version >= 4.0
USE_LLVM = 0

# whether use cuda runtime compiling for writing kernels in native language (i.e. Python)
USE_NVRTC = 0
# add the path to CUDA library to link and compile flag
# if you have already add them to environment variable.
# CUDA_PATH = /usr/local/cuda
1 change: 1 addition & 0 deletions python/tvm/_ctypes/_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def cfun(args, type_codes, num_args, ret, _):
check_call(_LIB.TVMCFuncSetReturn(ret, values[0], ctypes.c_int(tcodes[0])))
_ = temp_args
_ = rv
return 0

handle = FunctionHandle()
f = TVMPackedCFunc(cfun)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/_ctypes/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ class TVMByteArray(ctypes.Structure):


TVMPackedCFunc = ctypes.CFUNCTYPE(
None,
ctypes.c_int,
ctypes.POINTER(TVMValue),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int,
Expand Down
1 change: 1 addition & 0 deletions src/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@
- arithmetic Arithmetic expression and set simplification
- pass The optimization pass on the IR structure
- runtime Minimum runtime related codes.
- codegen The code generator
5 changes: 5 additions & 0 deletions src/api/api_codegen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ TVM_REGISTER_API(_codegen_BuildStackVM)
std::unordered_map<LoweredFunc, PackedFunc>());
});

TVM_REGISTER_API(_codegen_BuildLLVM)
.set_body([](TVMArgs args, TVMRetValue *ret) {
*ret = BuildLLVM(args[0]);
});

TVM_REGISTER_API(_codegen_BuildNVRTC)
.set_body([](TVMArgs args, TVMRetValue *ret) {
*ret = BuildNVRTC(args[0], args[1]);
Expand Down
4 changes: 3 additions & 1 deletion src/arithmetic/int_set.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ enum SignType {
};

// internal node container of int set.
class IntSetNode;
struct IntSetNode;

/*!
* \brief Integer set class, represent a set of integers in one dimension.
Expand Down Expand Up @@ -104,6 +104,8 @@ class IntSet : public NodeRef {
* \brief Base class of all IntSet containers.
*/
struct IntSetNode : public Node {
static constexpr const char* _type_key = "IntSet";
TVM_DECLARE_BASE_NODE_INFO(IntSetNode, Node);
};

using ExprIntSetMap = std::unordered_map<Expr, IntSet,
Expand Down
4 changes: 2 additions & 2 deletions src/arithmetic/int_set_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ struct IntervalSet : public IntSetNode {
}

static constexpr const char* _type_key = "IntervalSet";
TVM_DECLARE_NODE_TYPE_INFO(IntervalSet);
TVM_DECLARE_NODE_TYPE_INFO(IntervalSet, IntSetNode);
};

/*!
Expand All @@ -51,7 +51,7 @@ struct StrideSet : public IntSetNode {
Array<Expr> strides;

static constexpr const char* _type_key = "StrideSet";
TVM_DECLARE_NODE_TYPE_INFO(StrideSet);
TVM_DECLARE_NODE_TYPE_INFO(StrideSet, IntSetNode);
};

} // namespace arith
Expand Down
5 changes: 0 additions & 5 deletions src/codegen/codegen_stack_vm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -272,9 +272,6 @@ inline void PushBinary(StackVM::OpCode op_int64,
}
}




inline void PushCast(Type dst,
Type src,
CodeGenStackVM* p) {
Expand Down Expand Up @@ -496,7 +493,5 @@ TVM_STATIC_IR_FUNCTOR(CodeGenStackVM, vtable)
.set_dispatch<Call>([](const Call *op, CodeGenStackVM* p) {
p->Push_(op);
});


} // namespace codegen
} // namespace tvm
Loading