diff --git a/.github/workflows/issue_comment.yml b/.github/workflows/issue_comment.yml new file mode 100644 index 0000000000000..b5c80040fc9ff --- /dev/null +++ b/.github/workflows/issue_comment.yml @@ -0,0 +1,19 @@ +name: Sync issue comments to JIRA + +# This workflow will be triggered when new issue comment is created (including PR comments) +on: issue_comment + +jobs: + sync_issue_comments_to_jira: + name: Sync Issue Comments to Jira + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Sync issue comments to JIRA + uses: espressif/github-actions/sync_issues_to_jira@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JIRA_PASS: ${{ secrets.JIRA_PASS }} + JIRA_PROJECT: LLVM + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_USER: ${{ secrets.JIRA_USER }} diff --git a/.github/workflows/new_issues.yml b/.github/workflows/new_issues.yml new file mode 100644 index 0000000000000..a6602d1c7aa1c --- /dev/null +++ b/.github/workflows/new_issues.yml @@ -0,0 +1,19 @@ +name: Sync issues to Jira + +# This workflow will be triggered when a new issue is opened +on: issues + +jobs: + sync_issues_to_jira: + name: Sync issues to Jira + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Sync GitHub issues to Jira project + uses: espressif/github-actions/sync_issues_to_jira@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JIRA_PASS: ${{ secrets.JIRA_PASS }} + JIRA_PROJECT: LLVM + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_USER: ${{ secrets.JIRA_USER }} diff --git a/.github/workflows/new_prs.yml b/.github/workflows/new_prs.yml new file mode 100644 index 0000000000000..199d58ef87b3f --- /dev/null +++ b/.github/workflows/new_prs.yml @@ -0,0 +1,24 @@ +name: Sync remain PRs to Jira + +# This workflow will be triggered every hour, to sync remaining PRs (i.e. PRs with zero comment) to Jira project +# Note that, PRs can also get synced when new PR comment is created +on: + schedule: + - cron: "0 * * * *" + +jobs: + sync_prs_to_jira: + name: Sync PRs to Jira + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Sync PRs to Jira project + uses: espressif/github-actions/sync_issues_to_jira@master + with: + cron_job: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JIRA_PASS: ${{ secrets.JIRA_PASS }} + JIRA_PROJECT: LLVM + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_USER: ${{ secrets.JIRA_USER }} diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index 36109f1ccc095..813bc294c7174 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -258,7 +258,10 @@ class TargetInfo : public virtual TransferrableTargetInfo, // void *__overflow_arg_area; // void *__reg_save_area; // } va_list[1]; - SystemZBuiltinVaList + SystemZBuiltinVaList, + + // Tensilica Xtensa + XtensaABIBuiltinVaList }; protected: diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index f6919938d5aeb..6113134b9397a 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -7637,6 +7637,51 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } +static TypedefDecl * +CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 3; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // int* __va_stk; + FieldTypes[0] = Context->getPointerType(Context->IntTy); + FieldNames[0] = "__va_stk"; + + // int* __va_reg; + FieldTypes[1] = Context->getPointerType(Context->IntTy); + FieldNames[1] = "__va_reg"; + + // int __va_ndx; + FieldTypes[2] = Context->IntTy; + FieldNames[2] = "__va_ndx"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create( + *Context, VaListTagDecl, SourceLocation(), SourceLocation(), + &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl = + Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); + + return VaListTagTypedefDecl; +} + static TypedefDecl *CreateVaListDecl(const ASTContext *Context, TargetInfo::BuiltinVaListKind Kind) { switch (Kind) { @@ -7656,6 +7701,8 @@ static TypedefDecl *CreateVaListDecl(const ASTContext *Context, return CreateAAPCSABIBuiltinVaListDecl(Context); case TargetInfo::SystemZBuiltinVaList: return CreateSystemZBuiltinVaListDecl(Context); + case TargetInfo::XtensaABIBuiltinVaList: + return CreateXtensaABIBuiltinVaListDecl(Context); } llvm_unreachable("Unhandled __builtin_va_list type kind"); diff --git a/clang/lib/Basic/CMakeLists.txt b/clang/lib/Basic/CMakeLists.txt index be739c70468e5..7405a66b810d1 100644 --- a/clang/lib/Basic/CMakeLists.txt +++ b/clang/lib/Basic/CMakeLists.txt @@ -86,6 +86,7 @@ add_clang_library(clangBasic Targets/WebAssembly.cpp Targets/X86.cpp Targets/XCore.cpp + Targets/Xtensa.cpp TokenKinds.cpp Version.cpp Warnings.cpp diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index 63a64ed2931a8..10a9b8601cecf 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -36,6 +36,7 @@ #include "Targets/WebAssembly.h" #include "Targets/X86.h" #include "Targets/XCore.h" +#include "Targets/Xtensa.h" #include "clang/Basic/Diagnostic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" @@ -601,6 +602,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple, return new LinuxTargetInfo(Triple, Opts); case llvm::Triple::renderscript64: return new LinuxTargetInfo(Triple, Opts); + + case llvm::Triple::xtensa: + return new XtensaTargetInfo(Triple, Opts); } } } // namespace targets diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp new file mode 100644 index 0000000000000..da2dff7076d24 --- /dev/null +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -0,0 +1,30 @@ +//===--- Xtensa.cpp - Implement Xtensa target feature support ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements Xtensa TargetInfo objects. +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/MacroBuilder.h" +#include "clang/Basic/TargetBuiltins.h" + +using namespace clang; +using namespace clang::targets; + +void XtensaTargetInfo::getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const { + Builder.defineMacro("__Xtensa__"); + Builder.defineMacro("__xtensa__"); + Builder.defineMacro("__XTENSA__"); + Builder.defineMacro("__XTENSA_WINDOWED_ABI__"); + Builder.defineMacro("__XTENSA_EL__"); +} + diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h new file mode 100644 index 0000000000000..aaa24aecee924 --- /dev/null +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -0,0 +1,116 @@ +//===--- Xtensa.h - Declare Xtensa target feature support ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares Xtensa TargetInfo objects. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H +#define LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H + +#include "clang/Basic/TargetInfo.h" +#include "clang/Basic/TargetOptions.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Support/Compiler.h" + +#include "clang/Basic/Builtins.h" +#include "clang/Basic/MacroBuilder.h" +#include "clang/Basic/TargetBuiltins.h" + +namespace clang { +namespace targets { + +class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { + std::string CPU; + +public: + XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) + : TargetInfo(Triple) { + BigEndian = false; + NoAsmVariants = true; + LongLongAlign = 32; + SuitableAlign = 32; + DoubleAlign = LongDoubleAlign = 32; + SizeType = UnsignedInt; + PtrDiffType = SignedInt; + IntPtrType = SignedInt; + WCharType = UnsignedChar; + WIntType = UnsignedInt; + UseZeroLengthBitfieldAlignment = true; + MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; + resetDataLayout("e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32"); + } + + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override; + + ArrayRef getTargetBuiltins() const override { + + return None; + } + + BuiltinVaListKind getBuiltinVaListKind() const override { + + return TargetInfo::XtensaABIBuiltinVaList; + } + + const char *getClobbers() const override { + return ""; + } + + ArrayRef getGCCRegNames() const override { + static const char * const GCCRegNames[] = { + //General register name + "a0", "sp", "a1", "a2", "a3", "a4", "a5", "a6", "a7", + "a8", "a9", "a10", "a11", "a12", "a13", "a14", "a15", + //Special register name + "sar" + }; + return llvm::makeArrayRef(GCCRegNames); + } + + ArrayRef getGCCRegAliases() const override { + return None; + } + + bool validateAsmConstraint(const char *&Name, + TargetInfo::ConstraintInfo &Info) const override { + switch (*Name) { + default: + return false; + case 'a': + Info.setAllowsRegister(); + return true; + } + return false; + } + + int getEHDataRegisterNumber(unsigned RegNo) const override { + return (RegNo < 2)? RegNo : -1; + } + + bool isValidCPUName(StringRef Name) const override { + return llvm::StringSwitch(Name) + .Case("esp32", true) + .Case("esp8266", true) + .Case("esp32-s2", true) + .Case("generic", true) + .Default(false); + } + + bool setCPU(const std::string &Name) override { + CPU = Name; + return isValidCPUName(Name); + } + +}; +} // namespace targets +} // namespace clang +#endif // LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index f2696a33cfbf4..39994da6d598a 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -9683,6 +9683,186 @@ class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { }; } // namespace +//===----------------------------------------------------------------------===// +// Xtensa ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { +class XtensaABIInfo : public DefaultABIInfo { +private: + static const int NumArgGPRs = 6; + static const int MAX_ARG_IN_REGS_SIZE = 4 * 32; + static const int MAX_ARG_DIRECT_SIZE = MAX_ARG_IN_REGS_SIZE; + static const int MAX_RET_IN_REGS_SIZE = 2 * 32; + +public: + XtensaABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} + + // DefaultABIInfo's classifyReturnType and classifyArgumentType are + // non-virtual, but computeInfo is virtual, so we overload it. + void computeInfo(CGFunctionInfo &FI) const override; + + ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, + int &ArgGPRsLeft) const; + ABIArgInfo classifyReturnType(QualType RetTy) const; + + Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, + QualType Ty) const override; + + ABIArgInfo extendType(QualType Ty) const; +}; +} // end anonymous namespace + +void XtensaABIInfo::computeInfo(CGFunctionInfo &FI) const { + QualType RetTy = FI.getReturnType(); + if (!getCXXABI().classifyReturnType(FI)) + FI.getReturnInfo() = classifyReturnType(RetTy); + // IsRetIndirect is true if classifyArgumentType indicated the value should + // be passed indirect or if the type size is greater than 2*32. + // is passed direct in LLVM IR, relying on the backend lowering code to + // rewrite the argument list and pass indirectly on RV32. + bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect || + getContext().getTypeSize(RetTy) > MAX_RET_IN_REGS_SIZE; + // We must track the number of GPRs used in order to conform to the Xtensa + // ABI, as integer scalars passed in registers should have signext/zeroext + // when promoted, but are anyext if passed on the stack. As GPR usage is + // different for variadic arguments, we must also track whether we are + // examining a vararg or not. + int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; + int NumFixedArgs = FI.getNumRequiredArgs(); + int ArgNum = 0; + for (auto &ArgInfo : FI.arguments()) { + bool IsFixed = ArgNum < NumFixedArgs; + ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft); + ArgNum++; + } +} + +ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, + int &ArgGPRsLeft) const { + assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); + + Ty = useFirstFieldIfTransparentUnion(Ty); + // Structures with either a non-trivial destructor or a non-trivial + // copy constructor are always passed indirectly. + if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { + if (ArgGPRsLeft) + ArgGPRsLeft -= 1; + return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == + CGCXXABI::RAA_DirectInMemory); + } + // Ignore empty structs/unions. + if (isEmptyRecord(getContext(), Ty, true)) + return ABIArgInfo::getIgnore(); + uint64_t Size = getContext().getTypeSize(Ty); + uint64_t NeededAlign = getContext().getTypeAlign(Ty); + bool MustUseStack = false; + // Determine the number of GPRs needed to pass the current argument + // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" + // register pairs, so may consume 3 registers. + int NeededArgGPRs = 1; + if (!IsFixed && NeededAlign == 2 * 32) + NeededArgGPRs = 2 + (ArgGPRsLeft % 2); + else if (Size > 32 && Size <= MAX_ARG_IN_REGS_SIZE) + NeededArgGPRs = (Size + 31) / 32; + if (NeededArgGPRs > ArgGPRsLeft) { + MustUseStack = true; + NeededArgGPRs = ArgGPRsLeft; + } + ArgGPRsLeft -= NeededArgGPRs; + if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { + // Treat an enum type as its underlying type. + if (const EnumType *EnumTy = Ty->getAs()) + Ty = EnumTy->getDecl()->getIntegerType(); + // All integral types are promoted to XLen width, unless passed on the + // stack. + if (Size < 32 && Ty->isIntegralOrEnumerationType() && !MustUseStack) { + return extendType(Ty); + } + return ABIArgInfo::getDirect(); + } + + // Aggregates which are <= 4*32 will be passed in registers if possible, + // so coerce to integers. + if (Size <= MAX_ARG_IN_REGS_SIZE) { + unsigned Alignment = getContext().getTypeAlign(Ty); + // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is + // required, and a 2-element XLen array if only XLen alignment is + // required. + if (Size <= 32) { + return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 32)); + } + + else if (Alignment == 2 * 32) { + return ABIArgInfo::getDirect( + llvm::IntegerType::get(getVMContext(), 2 * 32)); + } else { + return ABIArgInfo::getDirect(llvm::ArrayType::get( + llvm::IntegerType::get(getVMContext(), 32), (Size + 31) / 32)); + } + } +#undef MAX_STRUCT_IN_REGS_SIZE + return getNaturalAlignIndirect(Ty, /*ByVal=*/false); +} + +ABIArgInfo XtensaABIInfo::classifyReturnType(QualType RetTy) const { + if (RetTy->isVoidType()) + return ABIArgInfo::getIgnore(); + int ArgGPRsLeft = 2; + // The rules for return and argument types are the same, so defer to + // classifyArgumentType. + return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft); +} + +Address XtensaABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, + QualType Ty) const { + CharUnits SlotSize = CharUnits::fromQuantity(32 / 8); + // Empty records are ignored for parameter passing purposes. + if (isEmptyRecord(getContext(), Ty, true)) { + // We try to return some dummy value which will be + // removed by backend + + auto TypeInfo = getContext().getTypeInfoInChars(Ty); + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TypeInfo, SlotSize, + /*AllowHigherAlign=*/false); + } + + std::pair SizeAndAlign = + getContext().getTypeInfoInChars(Ty); + // Arguments bigger than MAX_STRUCT_DIRECT_SIZE indirectly. + CharUnits DirectSize = CharUnits::fromQuantity(MAX_ARG_DIRECT_SIZE / 8); + bool IsIndirect = SizeAndAlign.first > DirectSize; + + if (IsIndirect) { + auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); + CharUnits TyAlignForABI = TyInfo.second; + + llvm::Type *BaseTy = + llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); + llvm::Value *Addr = + CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); + return Address(Addr, TyAlignForABI); + } else { + Address Temp = CGF.CreateMemTemp(Ty, "varet"); + llvm::Value *Val = + CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); + CGF.Builder.CreateStore(Val, Temp); + return Temp; + } +} + +ABIArgInfo XtensaABIInfo::extendType(QualType Ty) const { + return ABIArgInfo::getExtend(Ty); +} + +namespace { +class XtensaTargetCodeGenInfo : public TargetCodeGenInfo { +public: + XtensaTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) + : TargetCodeGenInfo(new XtensaABIInfo(CGT)) {} +}; +} // namespace + //===----------------------------------------------------------------------===// // Driver code //===----------------------------------------------------------------------===// @@ -9869,6 +10049,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { case llvm::Triple::spir: case llvm::Triple::spir64: return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); + case llvm::Triple::xtensa: + return SetCGInfo(new XtensaTargetCodeGenInfo(Types)); } } diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index d90c0ff436071..5f223f1df3fad 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -66,6 +66,7 @@ add_clang_library(clangDriver ToolChains/WebAssembly.cpp ToolChains/XCore.cpp ToolChains/PPCLinux.cpp + ToolChains/Xtensa.cpp Types.cpp XRayArgs.cpp diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 94d50d713c879..508bd0716f604 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -44,6 +44,7 @@ #include "ToolChains/TCE.h" #include "ToolChains/WebAssembly.h" #include "ToolChains/XCore.h" +#include "ToolChains/Xtensa.h" #include "clang/Basic/Version.h" #include "clang/Config/config.h" #include "clang/Driver/Action.h" @@ -4746,6 +4747,10 @@ const ToolChain &Driver::getToolChain(const ArgList &Args, case llvm::Triple::riscv64: TC = std::make_unique(*this, Target, Args); break; + case llvm::Triple::xtensa: + TC = + std::make_unique(*this, Target, Args); + break; default: if (Target.getVendor() == llvm::Triple::Myriad) TC = std::make_unique(*this, Target, diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 3d2fa8534080e..dc4cfcff28adc 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -309,6 +309,7 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T, case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::sparcv9: + case llvm::Triple::xtensa: if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) return A->getValue(); return ""; diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 71ed529b79413..a7ad0e17db81f 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2036,6 +2036,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( "s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu", "s390x-suse-linux", "s390x-redhat-linux"}; + static const char *const XtensaLibDirs[] = {"/lib"}; + static const char *const XtensaTriples[] = {"xtensa-unknown-elf"}; using std::begin; using std::end; @@ -2283,6 +2285,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs)); TripleAliases.append(begin(SystemZTriples), end(SystemZTriples)); break; + case llvm::Triple::xtensa: + LibDirs.append(begin(XtensaLibDirs), end(XtensaLibDirs)); + TripleAliases.append(begin(XtensaTriples), end(XtensaTriples)); + break; default: // By default, just rely on the standard lib directories and the original // triple. @@ -2588,6 +2594,7 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const { case llvm::Triple::mips64: case llvm::Triple::mips64el: case llvm::Triple::msp430: + // case llvm::Triple::xtensa: return true; case llvm::Triple::sparc: case llvm::Triple::sparcel: diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp new file mode 100644 index 0000000000000..d5986463f3733 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -0,0 +1,253 @@ +//===--- Xtensa.cpp - Xtensa ToolChain Implementations ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "CommonArgs.h" +#include "InputInfo.h" +#include "clang/Basic/Cuda.h" +#include "clang/Config/config.h" +#include "clang/Driver/Compilation.h" +#include "clang/Driver/Distro.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Driver/Options.h" +#include "llvm/Option/ArgList.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/VirtualFileSystem.h" +#include + +using namespace clang::driver; +using namespace clang::driver::tools; +using namespace clang::driver::toolchains; +using namespace clang; +using namespace llvm::opt; + +XtensaGCCToolchainDetector::XtensaGCCToolchainDetector( + const Driver &D, const llvm::Triple &HostTriple, + const llvm::opt::ArgList &Args) { + std::string InstalledDir; + InstalledDir = D.getInstalledDir(); + StringRef CPUName = XtensaToolChain::GetTargetCPUVersion(Args); + std::string Dir; + std::string ToolchainName; + std::string ToolchainDir; + + if (CPUName.equals("esp32")) + ToolchainName = "xtensa-esp32-elf"; + else if (CPUName.equals("esp8266")) + ToolchainName = "xtensa-lx106-elf"; + + // ToolchainDir = InstalledDir + "/../" + ToolchainName; + ToolchainDir = InstalledDir + "/.."; + Dir = ToolchainDir + "/lib/gcc/" + ToolchainName + "/"; + GCCLibAndIncVersion = ""; + + if (D.getVFS().exists(Dir)) { + std::error_code EC; + for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Dir, EC), LE; + !EC && LI != LE; LI = LI.increment(EC)) { + StringRef VersionText = llvm::sys::path::filename(LI->path()); + auto GCCVersion = Generic_GCC::GCCVersion::Parse(VersionText); + if (GCCVersion.Major == -1) + continue; + GCCLibAndIncVersion = GCCVersion.Text; + } + if (GCCLibAndIncVersion == "") + llvm_unreachable("Unexpected Xtensa GCC toolchain version"); + + } else { + // Unable to find Xtensa GCC toolchain; + GCCToolchainName = ""; + return; + } + GCCToolchainDir = ToolchainDir; + GCCToolchainName = ToolchainName; +} + +/// Xtensa Toolchain +XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args) + : Generic_ELF(D, Triple, Args), XtensaGCCToolchain(D, getTriple(), Args) { + for (auto *A : Args) { + std::string Str = A->getAsString(Args); + if (!Str.compare("-mlongcalls")) + A->claim(); + if (!Str.compare("-fno-tree-switch-conversion")) + A->claim(); + + // Currently don't use integrated assembler for assembler input files + if ((IsIntegratedAsm) && (Str.length() > 2)) { + std::string ExtSubStr = Str.substr(Str.length() - 2); + if (!ExtSubStr.compare(".s")) + IsIntegratedAsm = false; + if (!ExtSubStr.compare(".S")) + IsIntegratedAsm = false; + } + } + + // Currently don't use integrated assembler for assembler input files + if (IsIntegratedAsm) { + if (Args.getLastArgValue(options::OPT_x).equals("assembler")) + IsIntegratedAsm = false; + + if (Args.getLastArgValue(options::OPT_x).equals("assembler-with-cpp")) + IsIntegratedAsm = false; + } +} + +Tool *XtensaToolChain::buildLinker() const { + return new tools::Xtensa::Linker(*this); +} + +Tool *XtensaToolChain::buildAssembler() const { + return new tools::Xtensa::Assembler(*this); +} + +void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) || + DriverArgs.hasArg(options::OPT_nostdlibinc)) + return; + + if (!XtensaGCCToolchain.IsValid()) + return; + + std::string Path1 = + XtensaGCCToolchain.GCCToolchainDir + "/lib/clang/10.0.0/include"; + std::string Path2 = XtensaGCCToolchain.GCCToolchainDir + "/lib/gcc/" + + XtensaGCCToolchain.GCCToolchainName + "/" + + XtensaGCCToolchain.GCCLibAndIncVersion + "/include"; + std::string Path3 = XtensaGCCToolchain.GCCToolchainDir + "/lib/gcc/" + + XtensaGCCToolchain.GCCToolchainName + "/" + + XtensaGCCToolchain.GCCLibAndIncVersion + "/include-fixed"; + std::string Path4 = XtensaGCCToolchain.GCCToolchainDir + "/" + + XtensaGCCToolchain.GCCToolchainName + "/sys-include"; + std::string Path5 = XtensaGCCToolchain.GCCToolchainDir + "/" + + XtensaGCCToolchain.GCCToolchainName + "/include"; + const StringRef Paths[] = {Path1, Path2, Path3, Path4, Path5}; + addSystemIncludes(DriverArgs, CC1Args, Paths); +} + +void XtensaToolChain::addLibStdCxxIncludePaths( + const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const { + if (!XtensaGCCToolchain.IsValid()) + return; + + std::string BaseDir = XtensaGCCToolchain.GCCToolchainDir + "/" + + XtensaGCCToolchain.GCCToolchainName + "/include/c++/" + + XtensaGCCToolchain.GCCLibAndIncVersion; + std::string TargetDir = BaseDir + "/" + XtensaGCCToolchain.GCCToolchainName; + addLibStdCXXIncludePaths(BaseDir, "", "", "", "", "", DriverArgs, CC1Args); + addLibStdCXXIncludePaths(TargetDir, "", "", "", "", "", DriverArgs, CC1Args); + TargetDir = BaseDir + "/backward"; + addLibStdCXXIncludePaths(TargetDir, "", "", "", "", "", DriverArgs, CC1Args); +} + +ToolChain::CXXStdlibType +XtensaToolChain::GetCXXStdlibType(const ArgList &Args) const { + Arg *A = Args.getLastArg(options::OPT_stdlib_EQ); + if (!A) + return ToolChain::CST_Libstdcxx; + + StringRef Value = A->getValue(); + if (Value != "libstdc++") + getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args); + + return ToolChain::CST_Libstdcxx; +} + +const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args) { + if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) { + StringRef CPUName = A->getValue(); + return CPUName; + } + return "esp32"; +} + +void tools::Xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const auto &TC = + static_cast(getToolChain()); + + if (!TC.XtensaGCCToolchain.IsValid()) + llvm_unreachable("Unable to find Xtensa GCC assembler"); + + claimNoWarnArgs(Args); + ArgStringList CmdArgs; + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + + CmdArgs.push_back("-c"); + + if (Args.hasArg(options::OPT_v)) + CmdArgs.push_back("-v"); + + if (Arg *A = Args.getLastArg(options::OPT_g_Group)) + if (!A->getOption().matches(options::OPT_g0)) + CmdArgs.push_back("-g"); + + if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, + false)) + CmdArgs.push_back("-fverbose-asm"); + + Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); + + for (const auto &II : Inputs) + CmdArgs.push_back(II.getFilename()); + + const char *Asm = + Args.MakeArgString(getToolChain().getDriver().Dir + "/" + + TC.XtensaGCCToolchain.GCCToolchainName + "-as"); + C.addCommand(std::make_unique(JA, *this, Asm, CmdArgs, Inputs)); +} + +void Xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const auto &TC = + static_cast(getToolChain()); + + if (!TC.XtensaGCCToolchain.IsValid()) + llvm_unreachable("Unable to find Xtensa GCC linker"); + + std::string Linker = getToolChain().getDriver().Dir + "/" + + TC.XtensaGCCToolchain.GCCToolchainName + "-ld"; + ArgStringList CmdArgs; + + Args.AddAllArgs(CmdArgs, + {options::OPT_T_Group, options::OPT_e, options::OPT_s, + options::OPT_L, options::OPT_t, options::OPT_u_Group}); + + AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA); + + std::string Libs = TC.XtensaGCCToolchain.GCCToolchainDir + "/lib/gcc/" + + TC.XtensaGCCToolchain.GCCToolchainName + "/" + + TC.XtensaGCCToolchain.GCCLibAndIncVersion + "/"; + CmdArgs.push_back("-L"); + CmdArgs.push_back(Args.MakeArgString(Libs)); + + Libs = TC.XtensaGCCToolchain.GCCToolchainDir + "/" + + TC.XtensaGCCToolchain.GCCToolchainName + "/lib/"; + CmdArgs.push_back("-L"); + CmdArgs.push_back(Args.MakeArgString(Libs)); + + CmdArgs.push_back("-v"); + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + C.addCommand(std::make_unique(JA, *this, Args.MakeArgString(Linker), + CmdArgs, Inputs)); +} diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h new file mode 100644 index 0000000000000..caed5cf4ea010 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -0,0 +1,92 @@ +//===--- Xtensa.h - Xtensa Tool and ToolChain Implementations ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H + +#include "Gnu.h" +#include "InputInfo.h" +#include "clang/Driver/Tool.h" +#include "clang/Driver/ToolChain.h" + +namespace clang { +namespace driver { +namespace toolchains { + +class XtensaGCCToolchainDetector { +public: + std::string GCCLibAndIncVersion; + std::string GCCToolchainName; + std::string GCCToolchainDir; + + XtensaGCCToolchainDetector(const Driver &D, const llvm::Triple &HostTriple, + const llvm::opt::ArgList &Args); + + bool IsValid() const { return GCCToolchainName != ""; } +}; + +class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { +protected: + Tool *buildLinker() const override; + Tool *buildAssembler() const override; + +public: + XtensaToolChain(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args); + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + void + addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override; + bool IsIntegratedAssemblerDefault() const override { + return (IsIntegratedAsm || (XtensaGCCToolchain.GCCToolchainName == "")); + } + + static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args); + + XtensaGCCToolchainDetector XtensaGCCToolchain; + bool IsIntegratedAsm = true; +}; + +} // end namespace toolchains + +namespace tools { +namespace Xtensa { +class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool { +public: + Linker(const ToolChain &TC) + : GnuTool("Xtensa::Linker", "xtensa-esp32-elf-ld", TC) {} + bool hasIntegratedCPP() const override { return false; } + bool isLinkJob() const override { return true; } + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; + +class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { +public: + Assembler(const ToolChain &TC) + : Tool("Xtensa::Assembler", "xtensa-esp32-elf-as", TC) {} + + bool hasIntegratedCPP() const override { return false; } + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; + +} // end namespace Xtensa +} // end namespace tools +} // end namespace driver +} // end namespace clang + +#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H diff --git a/llvm/include/llvm/ADT/Triple.h b/llvm/include/llvm/ADT/Triple.h index edeb31efab801..48e582386364a 100644 --- a/llvm/include/llvm/ADT/Triple.h +++ b/llvm/include/llvm/ADT/Triple.h @@ -78,6 +78,7 @@ class Triple { x86, // X86: i[3-9]86 x86_64, // X86-64: amd64, x86_64 xcore, // XCore: xcore + xtensa, // Tensilica Xtensa nvptx, // NVPTX: 32-bit nvptx64, // NVPTX: 64-bit le32, // le32: generic little-endian 32-bit CPU (PNaCl) diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h index 46edfb6260be1..4ecc311a2e63e 100644 --- a/llvm/include/llvm/BinaryFormat/ELF.h +++ b/llvm/include/llvm/BinaryFormat/ELF.h @@ -767,6 +767,21 @@ enum { #include "ELFRelocs/MSP430.def" }; +// Xtensa specific e_flags +enum : unsigned { + /* Four-bit Xtensa machine type field. */ + EF_XTENSA_MACH = 0x0000000f, + /* Various CPU types. */ + E_XTENSA_MACH = 0x00000000, + EF_XTENSA_XT_INSN = 0x00000100, + EF_XTENSA_XT_LIT = 0x00000200, +}; + +// ELF Relocation types for Xtensa +enum { +#include "ELFRelocs/Xtensa.def" +}; + #undef ELF_RELOC // Section header. diff --git a/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def b/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def new file mode 100644 index 0000000000000..e0b2776b57a40 --- /dev/null +++ b/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def @@ -0,0 +1,59 @@ +#ifndef ELF_RELOC +#error "ELF_RELOC must be defined" +#endif + +ELF_RELOC (R_XTENSA_NONE, 0) +ELF_RELOC (R_XTENSA_32, 1) +ELF_RELOC (R_XTENSA_RTLD, 2) +ELF_RELOC (R_XTENSA_GLOB_DAT, 3) +ELF_RELOC (R_XTENSA_JMP_SLOT, 4) +ELF_RELOC (R_XTENSA_RELATIVE, 5) +ELF_RELOC (R_XTENSA_PLT, 6) +ELF_RELOC (R_XTENSA_OP0, 8) +ELF_RELOC (R_XTENSA_OP1, 9) +ELF_RELOC (R_XTENSA_OP2, 10) +ELF_RELOC (R_XTENSA_ASM_EXPAND, 11) +ELF_RELOC (R_XTENSA_ASM_SIMPLIFY, 12) +ELF_RELOC (R_XTENSA_32_PCREL, 14) +ELF_RELOC (R_XTENSA_GNU_VTINHERIT, 15) +ELF_RELOC (R_XTENSA_GNU_VTENTRY, 16) +ELF_RELOC (R_XTENSA_DIFF8, 17) +ELF_RELOC (R_XTENSA_DIFF16, 18) +ELF_RELOC (R_XTENSA_DIFF32, 19) +ELF_RELOC (R_XTENSA_SLOT0_OP, 20) +ELF_RELOC (R_XTENSA_SLOT1_OP, 21) +ELF_RELOC (R_XTENSA_SLOT2_OP, 22) +ELF_RELOC (R_XTENSA_SLOT3_OP, 23) +ELF_RELOC (R_XTENSA_SLOT4_OP, 24) +ELF_RELOC (R_XTENSA_SLOT5_OP, 25) +ELF_RELOC (R_XTENSA_SLOT6_OP, 26) +ELF_RELOC (R_XTENSA_SLOT7_OP, 27) +ELF_RELOC (R_XTENSA_SLOT8_OP, 28) +ELF_RELOC (R_XTENSA_SLOT9_OP, 29) +ELF_RELOC (R_XTENSA_SLOT10_OP, 30) +ELF_RELOC (R_XTENSA_SLOT11_OP, 31) +ELF_RELOC (R_XTENSA_SLOT12_OP, 32) +ELF_RELOC (R_XTENSA_SLOT13_OP, 33) +ELF_RELOC (R_XTENSA_SLOT14_OP, 34) +ELF_RELOC (R_XTENSA_SLOT0_ALT, 35) +ELF_RELOC (R_XTENSA_SLOT1_ALT, 36) +ELF_RELOC (R_XTENSA_SLOT2_ALT, 37) +ELF_RELOC (R_XTENSA_SLOT3_ALT, 38) +ELF_RELOC (R_XTENSA_SLOT4_ALT, 39) +ELF_RELOC (R_XTENSA_SLOT5_ALT, 40) +ELF_RELOC (R_XTENSA_SLOT6_ALT, 41) +ELF_RELOC (R_XTENSA_SLOT7_ALT, 42) +ELF_RELOC (R_XTENSA_SLOT8_ALT, 43) +ELF_RELOC (R_XTENSA_SLOT9_ALT, 44) +ELF_RELOC (R_XTENSA_SLOT10_ALT, 45) +ELF_RELOC (R_XTENSA_SLOT11_ALT, 46) +ELF_RELOC (R_XTENSA_SLOT12_ALT, 47) +ELF_RELOC (R_XTENSA_SLOT13_ALT, 48) +ELF_RELOC (R_XTENSA_SLOT14_ALT, 49) +ELF_RELOC (R_XTENSA_TLSDESC_FN, 50) +ELF_RELOC (R_XTENSA_TLSDESC_ARG, 51) +ELF_RELOC (R_XTENSA_TLS_DTPOFF, 52) +ELF_RELOC (R_XTENSA_TLS_TPOFF, 53) +ELF_RELOC (R_XTENSA_TLS_FUNC, 54) +ELF_RELOC (R_XTENSA_TLS_ARG, 55) +ELF_RELOC (R_XTENSA_TLS_CALL, 56) diff --git a/llvm/lib/MC/MCObjectFileInfo.cpp b/llvm/lib/MC/MCObjectFileInfo.cpp index 70c0409ece7a9..daccbdc2c5681 100644 --- a/llvm/lib/MC/MCObjectFileInfo.cpp +++ b/llvm/lib/MC/MCObjectFileInfo.cpp @@ -322,6 +322,9 @@ void MCObjectFileInfo::initELFMCObjectFileInfo(const Triple &T, bool Large) { FDECFIEncoding = PositionIndependent ? dwarf::DW_EH_PE_pcrel : dwarf::DW_EH_PE_absptr; break; + case Triple::xtensa: + FDECFIEncoding = dwarf::DW_EH_PE_sdata4; + break; default: FDECFIEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4; break; diff --git a/llvm/lib/Object/ELF.cpp b/llvm/lib/Object/ELF.cpp index d491288579df6..3c470eff5067e 100644 --- a/llvm/lib/Object/ELF.cpp +++ b/llvm/lib/Object/ELF.cpp @@ -145,6 +145,13 @@ StringRef llvm::object::getELFRelocationTypeName(uint32_t Machine, break; } break; + case ELF::EM_XTENSA: + switch (Type) { +#include "llvm/BinaryFormat/ELFRelocs/Xtensa.def" + default: + break; + } + break; default: break; } diff --git a/llvm/lib/ObjectYAML/ELFYAML.cpp b/llvm/lib/ObjectYAML/ELFYAML.cpp index 94b1df1043916..685181dacde64 100644 --- a/llvm/lib/ObjectYAML/ELFYAML.cpp +++ b/llvm/lib/ObjectYAML/ELFYAML.cpp @@ -425,6 +425,11 @@ void ScalarBitSetTraits::bitset(IO &IO, break; case ELF::EM_X86_64: break; + case ELF::EM_XTENSA: + BCase(EF_XTENSA_XT_INSN); + BCaseMask(E_XTENSA_MACH, EF_XTENSA_MACH); + BCase(EF_XTENSA_XT_LIT); + break; default: llvm_unreachable("Unsupported architecture"); } @@ -651,6 +656,9 @@ void ScalarEnumerationTraits::enumeration( case ELF::EM_BPF: #include "llvm/BinaryFormat/ELFRelocs/BPF.def" break; + case ELF::EM_XTENSA: +#include "llvm/BinaryFormat/ELFRelocs/Xtensa.def" + break; case ELF::EM_PPC64: #include "llvm/BinaryFormat/ELFRelocs/PowerPC64.def" break; diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp index d419463e6a5e6..c427b9c042e14 100644 --- a/llvm/lib/Support/Triple.cpp +++ b/llvm/lib/Support/Triple.cpp @@ -70,6 +70,7 @@ StringRef Triple::getArchTypeName(ArchType Kind) { case wasm64: return "wasm64"; case renderscript32: return "renderscript32"; case renderscript64: return "renderscript64"; + case xtensa: return "xtensa"; } llvm_unreachable("Invalid ArchType!"); @@ -144,6 +145,8 @@ StringRef Triple::getArchTypePrefix(ArchType Kind) { case riscv32: case riscv64: return "riscv"; + + case xtensa: return "xtensa"; } } @@ -315,6 +318,7 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) { .Case("wasm64", wasm64) .Case("renderscript32", renderscript32) .Case("renderscript64", renderscript64) + .Case("xtensa", xtensa) .Default(UnknownArch); } @@ -443,6 +447,7 @@ static Triple::ArchType parseArch(StringRef ArchName) { .Case("wasm64", Triple::wasm64) .Case("renderscript32", Triple::renderscript32) .Case("renderscript64", Triple::renderscript64) + .Case("xtensa", Triple::xtensa) .Default(Triple::UnknownArch); // Some architectures require special parsing logic just to compute the @@ -701,6 +706,7 @@ static Triple::ObjectFormatType getDefaultFormat(const Triple &T) { case Triple::tcele: case Triple::thumbeb: case Triple::xcore: + case Triple::xtensa: return Triple::ELF; case Triple::ppc: @@ -1262,6 +1268,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) { case llvm::Triple::shave: case llvm::Triple::wasm32: case llvm::Triple::renderscript32: + case llvm::Triple::xtensa: return 32; case llvm::Triple::aarch64: @@ -1343,6 +1350,7 @@ Triple Triple::get32BitArchVariant() const { case Triple::shave: case Triple::wasm32: case Triple::renderscript32: + case Triple::xtensa: // Already 32-bit. break; @@ -1381,6 +1389,7 @@ Triple Triple::get64BitArchVariant() const { case Triple::xcore: case Triple::sparcel: case Triple::shave: + case Triple::xtensa: T.setArch(UnknownArch); break; @@ -1467,6 +1476,7 @@ Triple Triple::getBigEndianArchVariant() const { // drop any arch suffixes. case Triple::arm: case Triple::thumb: + case Triple::xtensa: T.setArch(UnknownArch); break; @@ -1553,6 +1563,7 @@ bool Triple::isLittleEndian() const { case Triple::tcele: case Triple::renderscript32: case Triple::renderscript64: + case Triple::xtensa: return true; default: return false; diff --git a/llvm/lib/Target/LLVMBuild.txt b/llvm/lib/Target/LLVMBuild.txt index d6a95a3c67133..41bdbf58932e9 100644 --- a/llvm/lib/Target/LLVMBuild.txt +++ b/llvm/lib/Target/LLVMBuild.txt @@ -36,6 +36,7 @@ subdirectories = WebAssembly X86 XCore + Xtensa ; This is a special group whose required libraries are extended (by llvm-build) ; with the best execution engine (the native JIT, if available, or the diff --git a/llvm/lib/Target/Xtensa/AsmParser/CMakeLists.txt b/llvm/lib/Target/Xtensa/AsmParser/CMakeLists.txt new file mode 100644 index 0000000000000..dc8612a76d56d --- /dev/null +++ b/llvm/lib/Target/Xtensa/AsmParser/CMakeLists.txt @@ -0,0 +1,7 @@ +include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +add_llvm_library(LLVMXtensaAsmParser + XtensaAsmParser.cpp + ) + +add_dependencies(LLVMXtensaAsmParser XtensaCommonTableGen) diff --git a/llvm/lib/Target/Xtensa/AsmParser/LLVMBuild.txt b/llvm/lib/Target/Xtensa/AsmParser/LLVMBuild.txt new file mode 100644 index 0000000000000..a1ca90e003247 --- /dev/null +++ b/llvm/lib/Target/Xtensa/AsmParser/LLVMBuild.txt @@ -0,0 +1,16 @@ +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = XtensaAsmParser +parent = Xtensa +required_libraries = XtensaDesc XtensaInfo MC MCParser Support +add_to_library_groups = Xtensa diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp new file mode 100644 index 0000000000000..ae93554bbed26 --- /dev/null +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -0,0 +1,803 @@ +//===- XtensaAsmParser.cpp - Parse Xtensa assembly to MCInst instructions -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/XtensaMCExpr.h" +#include "MCTargetDesc/XtensaMCTargetDesc.h" +#include "MCTargetDesc/XtensaTargetStreamer.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCParser/MCAsmLexer.h" +#include "llvm/MC/MCParser/MCParsedAsmOperand.h" +#include "llvm/MC/MCParser/MCTargetAsmParser.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensav-asm-parser" + +struct XtensaOperand; + +class XtensaAsmParser : public MCTargetAsmParser { + + StringMap LabelVKTable; + + SMLoc getLoc() const { return getParser().getTok().getLoc(); } + + XtensaTargetStreamer &getTargetStreamer() { + MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); + return static_cast(TS); + } + + // Override MCTargetAsmParser. + bool ParseDirective(AsmToken DirectiveID) override; + bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) override; + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, MCStreamer &Out, + uint64_t &ErrorInfo, + bool MatchingInlineAsm) override; + unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, + unsigned Kind) override; + + bool processInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out, + const MCSubtargetInfo *STI); + +// Auto-generated instruction matching functions +#define GET_ASSEMBLER_HEADER +#include "XtensaGenAsmMatcher.inc" + + OperandMatchResultTy parseImmediate(OperandVector &Operands); + OperandMatchResultTy parseRegister(OperandVector &Operands, + bool AllowParens = false, bool SR = false); + OperandMatchResultTy parseOperandWithModifier(OperandVector &Operands); + bool parseOperand(OperandVector &Operands, StringRef Mnemonic, + bool SR = false); + bool ParseInstructionWithSR(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands); + OperandMatchResultTy parsePCRelTarget(OperandVector &Operands); + +public: + enum XtensaMatchResultTy { + Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY, +#define GET_OPERAND_DIAGNOSTIC_TYPES +#include "XtensaGenAsmMatcher.inc" +#undef GET_OPERAND_DIAGNOSTIC_TYPES + }; + + XtensaAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, + const MCInstrInfo &MII, const MCTargetOptions &Options) + : MCTargetAsmParser(Options, STI, MII) { + setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); + } +}; + +// Return true if Expr is in the range [MinValue, MaxValue]. +static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) { + if (auto *CE = dyn_cast(Expr)) { + int64_t Value = CE->getValue(); + return Value >= MinValue && Value <= MaxValue; + } + return false; +} + +struct XtensaOperand : public MCParsedAsmOperand { + + enum KindTy { + Token, + Register, + Immediate, + } Kind; + + struct RegOp { + unsigned RegNum; + }; + + struct ImmOp { + const MCExpr *Val; + }; + + SMLoc StartLoc, EndLoc; + union { + StringRef Tok; + RegOp Reg; + ImmOp Imm; + }; + + XtensaOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} + +public: + XtensaOperand(const XtensaOperand &o) : MCParsedAsmOperand() { + Kind = o.Kind; + StartLoc = o.StartLoc; + EndLoc = o.EndLoc; + switch (Kind) { + case Register: + Reg = o.Reg; + break; + case Immediate: + Imm = o.Imm; + break; + case Token: + Tok = o.Tok; + break; + } + } + + bool isToken() const override { return Kind == Token; } + bool isReg() const override { return Kind == Register; } + bool isImm() const override { return Kind == Immediate; } + bool isMem() const override { return false; } + + bool isImm(int64_t MinValue, int64_t MaxValue) const { + return Kind == Immediate && inRange(getImm(), MinValue, MaxValue); + } + + bool isImm8() const { return isImm(-128, 127); } + + bool isImm8_sh8() const { + return isImm(-32768, 32512) && + ((dyn_cast(getImm())->getValue() & 0xFF) == 0); + } + + bool isImm12() const { return isImm(-2048, 2047); } + + bool isImm12m() const { + return isImm(LONG_MIN, LONG_MAX); /*return isImm(-2048, 2047);*/ + } + + bool isOffset4m32() const { + return isImm(0, 60) && + ((dyn_cast(getImm())->getValue() & 0x3) == 0); + } + + bool isOffset8m8() const { return isImm(0, 255); } + + bool isOffset8m16() const { + return isImm(0, 510) && + ((dyn_cast(getImm())->getValue() & 0x1) == 0); + } + + bool isOffset8m32() const { + return isImm(0, 1020) && + ((dyn_cast(getImm())->getValue() & 0x3) == 0); + } + + bool isentry_imm12() const { return isImm(0, 32760); } + + bool isUimm4() const { return isImm(0, 15); } + + bool isUimm5() const { return isImm(0, 31); } + + bool isImm8n_7() const { return isImm(-8, 7); } + + bool isShimm1_31() const { return isImm(1, 31); } + + bool isImm16_31() const { return isImm(16, 31); } + + bool isImm1_16() const { return isImm(1, 16); } + + bool isImm1n_15() const { return (isImm(1, 15) || isImm(-1, -1)); } + + bool isImm32n_95() const { return isImm(-32, 95); } + + bool isImm64n_4n() const { + return isImm(-64, -4) && + ((dyn_cast(getImm())->getValue() & 0x3) == 0); + } + + bool isB4const() const { + if (Kind != Immediate) + return false; + if (auto *CE = dyn_cast(getImm())) { + int64_t Value = CE->getValue(); + switch (Value) { + case -1: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 10: + case 12: + case 16: + case 32: + case 64: + case 128: + case 256: + return true; + default: + return false; + } + } + return false; + } + + bool isB4constu() const { + if (Kind != Immediate) + return false; + if (auto *CE = dyn_cast(getImm())) { + int64_t Value = CE->getValue(); + switch (Value) { + case 32768: + case 65536: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 10: + case 12: + case 16: + case 32: + case 64: + case 128: + case 256: + return true; + default: + return false; + } + } + return false; + } + + bool isseimm7_22() const { return isImm(7, 22); } + + /// getStartLoc - Gets location of the first token of this operand + SMLoc getStartLoc() const override { return StartLoc; } + /// getEndLoc - Gets location of the last token of this operand + SMLoc getEndLoc() const override { return EndLoc; } + + unsigned getReg() const override { + assert(Kind == Register && "Invalid type access!"); + return Reg.RegNum; + } + + const MCExpr *getImm() const { + assert(Kind == Immediate && "Invalid type access!"); + return Imm.Val; + } + + StringRef getToken() const { + assert(Kind == Token && "Invalid type access!"); + return Tok; + } + + void print(raw_ostream &OS) const override { + switch (Kind) { + case Immediate: + OS << *getImm(); + break; + case Register: + OS << ""; + break; + case Token: + OS << "'" << getToken() << "'"; + break; + } + } + + static std::unique_ptr createToken(StringRef Str, SMLoc S) { + auto Op = std::make_unique(Token); + Op->Tok = Str; + Op->StartLoc = S; + Op->EndLoc = S; + return Op; + } + + static std::unique_ptr createReg(unsigned RegNo, SMLoc S, + SMLoc E) { + auto Op = std::make_unique(Register); + Op->Reg.RegNum = RegNo; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static std::unique_ptr createImm(const MCExpr *Val, SMLoc S, + SMLoc E) { + auto Op = std::make_unique(Immediate); + Op->Imm.Val = Val; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + void addExpr(MCInst &Inst, const MCExpr *Expr) const { + assert(Expr && "Expr shouldn't be null!"); + int64_t Imm = 0; + bool IsConstant = false; + + if (auto *CE = dyn_cast(Expr)) { + IsConstant = true; + Imm = CE->getValue(); + } + + if (IsConstant) + Inst.addOperand(MCOperand::createImm(Imm)); + else + Inst.addOperand(MCOperand::createExpr(Expr)); + } + + // Used by the TableGen Code + void addRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createReg(getReg())); + } + + void addImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + addExpr(Inst, getImm()); + } +}; + +#define GET_REGISTER_MATCHER +#define GET_MATCHER_IMPLEMENTATION +#include "XtensaGenAsmMatcher.inc" + +unsigned XtensaAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, + unsigned Kind) { + return Match_InvalidOperand; +} + +static SMLoc RefineErrorLoc(const SMLoc Loc, const OperandVector &Operands, + uint64_t ErrorInfo) { + if (ErrorInfo != ~0ULL && ErrorInfo < Operands.size()) { + SMLoc ErrorLoc = Operands[ErrorInfo]->getStartLoc(); + if (ErrorLoc == SMLoc()) + return Loc; + return ErrorLoc; + } + return Loc; +} + +bool XtensaAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, + MCStreamer &Out, + const MCSubtargetInfo *STI) { + Inst.setLoc(IDLoc); + const unsigned Opcode = Inst.getOpcode(); + + switch (Opcode) { + case Xtensa::L32R: { + const MCSymbolRefExpr *OpExpr = + (const MCSymbolRefExpr *)Inst.getOperand(1).getExpr(); + StringRef Name = OpExpr->getSymbol().getName(); + XtensaMCExpr::VariantKind Kind = XtensaMCExpr::VK_Xtensa_None; + StringMap::const_iterator Entry = + LabelVKTable.find(Name); + if (Entry != LabelVKTable.end()) { + Kind = Entry->getValue(); + } + const MCExpr *NewOpExpr = XtensaMCExpr::create(OpExpr, Kind, getContext()); + Inst.getOperand(1).setExpr(NewOpExpr); + } break; + case Xtensa::MOVI: { + if (!Inst.getOperand(1).isExpr()) { + uint64_t ImmOp64 = Inst.getOperand(1).getImm(); + int32_t Imm = ImmOp64; + if ((Imm < -2048) || (Imm > 2047)) { + XtensaTargetStreamer &TS = this->getTargetStreamer(); + MCInst TmpInst; + TmpInst.setLoc(IDLoc); + TmpInst.setOpcode(Xtensa::L32R); + const MCExpr *Value = MCConstantExpr::create(ImmOp64, getContext()); + MCSymbol *Sym = getContext().createTempSymbol(); + const MCExpr *Expr = MCSymbolRefExpr::create( + Sym, MCSymbolRefExpr::VK_None, getContext()); + const MCExpr *OpExpr = XtensaMCExpr::create( + Expr, XtensaMCExpr::VK_Xtensa_None, getContext()); + TmpInst.addOperand(Inst.getOperand(0)); + MCOperand Op1 = MCOperand::createExpr(OpExpr); + TmpInst.addOperand(Op1); + TS.emitLiteralLabel(Sym, IDLoc); + TS.emitLiteral(Value, IDLoc); + Inst = TmpInst; + } + } else { + MCInst TmpInst; + TmpInst.setLoc(IDLoc); + TmpInst.setOpcode(Xtensa::L32R); + const MCExpr *Expr = Inst.getOperand(1).getExpr(); + const MCExpr *OpExpr = XtensaMCExpr::create( + Expr, XtensaMCExpr::VK_Xtensa_None, getContext()); + TmpInst.addOperand(Inst.getOperand(0)); + MCOperand Op1 = MCOperand::createExpr(OpExpr); + TmpInst.addOperand(Op1); + Inst = TmpInst; + } + } break; + default: + break; + } + return true; +} + +bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, + MCStreamer &Out, + uint64_t &ErrorInfo, + bool MatchingInlineAsm) { + MCInst Inst; + auto Result = + MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); + + switch (Result) { + default: + break; + case Match_Success: + processInstruction(Inst, IDLoc, Out, STI); + Inst.setLoc(IDLoc); + Out.EmitInstruction(Inst, getSTI()); + return false; + case Match_MissingFeature: + return Error(IDLoc, "instruction use requires an option to be enabled"); + case Match_MnemonicFail: + return Error(IDLoc, "unrecognized instruction mnemonic"); + case Match_InvalidOperand: { + SMLoc ErrorLoc = IDLoc; + if (ErrorInfo != ~0U) { + if (ErrorInfo >= Operands.size()) + return Error(ErrorLoc, "too few operands for instruction"); + + ErrorLoc = ((XtensaOperand &)*Operands[ErrorInfo]).getStartLoc(); + if (ErrorLoc == SMLoc()) + ErrorLoc = IDLoc; + } + return Error(ErrorLoc, "invalid operand for instruction"); + } + case Match_InvalidImm8: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-128, 127]"); + case Match_InvalidImm8_sh8: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-32768, 32512], first 8 bits " + "should be zero"); + case Match_InvalidB4const: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected b4const immediate"); + case Match_InvalidB4constu: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected b4constu immediate"); + case Match_InvalidImm12: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-2048, 2047]"); + case Match_InvalidImm12m: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-2048, 2047]"); + case Match_InvalidImm1_16: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [1, 16]"); + case Match_InvalidImm1n_15: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-1, 15] except 0"); + case Match_InvalidImm32n_95: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-32, 95] except 0"); + case Match_InvalidImm8n_7: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-8, 7]"); + case Match_InvalidImm64n_4n: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-64, -4]"); + case Match_InvalidShimm1_31: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [1, 31]"); + case Match_InvalidUimm4: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 15]"); + case Match_InvalidUimm5: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 31]"); + case Match_InvalidOffset8m8: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 255]"); + case Match_InvalidOffset8m16: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 510], first bit " + "should be zero"); + case Match_InvalidOffset8m32: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 1020], first 2 bits " + "should be zero"); + case Match_InvalidOffset4m32: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 60], first 2 bits " + "should be zero"); + case Match_Invalidentry_imm12: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 32760]"); + } + + llvm_unreachable("Unknown match type detected!"); +} + +OperandMatchResultTy +XtensaAsmParser::parsePCRelTarget(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + LLVM_DEBUG(dbgs() << "parsePCRelTarget\n"); + + SMLoc S = getLexer().getLoc(); + + // Expressions are acceptable + const MCExpr *Expr = nullptr; + if (Parser.parseExpression(Expr)) { + // We have no way of knowing if a symbol was consumed so we must ParseFail + return MatchOperand_ParseFail; + } + + // Currently not support constants + if (Expr->getKind() == MCExpr::ExprKind::Constant) { + Error(getLoc(), "unknown operand"); + return MatchOperand_ParseFail; + } + + Operands.push_back(XtensaOperand::createImm(Expr, S, getLexer().getLoc())); + return MatchOperand_Success; +} + +bool XtensaAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, + SMLoc &EndLoc) { + const AsmToken &Tok = getParser().getTok(); + StartLoc = Tok.getLoc(); + EndLoc = Tok.getEndLoc(); + RegNo = 0; + StringRef Name = getLexer().getTok().getIdentifier(); + + if ((!MatchRegisterName(Name)) && (!MatchRegisterAltName(Name))) { + getParser().Lex(); // Eat identifier token. + return false; + } + + return Error(StartLoc, "invalid register name"); +} + +OperandMatchResultTy XtensaAsmParser::parseRegister(OperandVector &Operands, + bool AllowParens, bool SR) { + SMLoc FirstS = getLoc(); + bool HadParens = false; + AsmToken Buf[2]; + std::string RegName; + + // If this a parenthesised register name is allowed, parse it atomically + if (AllowParens && getLexer().is(AsmToken::LParen)) { + size_t ReadCount = getLexer().peekTokens(Buf); + if (ReadCount == 2 && Buf[1].getKind() == AsmToken::RParen) { + HadParens = true; + getParser().Lex(); // Eat '(' + } + } + + switch (getLexer().getKind()) { + default: + return MatchOperand_NoMatch; + case AsmToken::Integer: + if (!SR) + return MatchOperand_NoMatch; + RegName = std::to_string(getLexer().getTok().getIntVal()); + break; + case AsmToken::Identifier: + RegName = getLexer().getTok().getIdentifier(); + break; + } + + unsigned RegNo = MatchRegisterName(RegName); + if (RegNo == 0) + RegNo = MatchRegisterAltName(RegName); + + if (RegNo == 0) { + if (HadParens) + getLexer().UnLex(Buf[0]); + return MatchOperand_NoMatch; + } + if (HadParens) + Operands.push_back(XtensaOperand::createToken("(", FirstS)); + SMLoc S = getLoc(); + SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1); + getLexer().Lex(); + Operands.push_back(XtensaOperand::createReg(RegNo, S, E)); + + if (HadParens) { + getParser().Lex(); // Eat ')' + Operands.push_back(XtensaOperand::createToken(")", getLoc())); + } + + return MatchOperand_Success; +} + +OperandMatchResultTy XtensaAsmParser::parseImmediate(OperandVector &Operands) { + SMLoc S = getLoc(); + SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1); + const MCExpr *Res; + + switch (getLexer().getKind()) { + default: + return MatchOperand_NoMatch; + case AsmToken::LParen: + case AsmToken::Minus: + case AsmToken::Plus: + case AsmToken::Tilde: + case AsmToken::Integer: + case AsmToken::String: + if (getParser().parseExpression(Res)) + return MatchOperand_ParseFail; + break; + case AsmToken::Identifier: { + StringRef Identifier; + if (getParser().parseIdentifier(Identifier)) + return MatchOperand_ParseFail; + + MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier); + Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext()); + break; + } + case AsmToken::Percent: + return parseOperandWithModifier(Operands); + } + + Operands.push_back(XtensaOperand::createImm(Res, S, E)); + return MatchOperand_Success; +} + +OperandMatchResultTy +XtensaAsmParser::parseOperandWithModifier(OperandVector &Operands) { + return MatchOperand_ParseFail; +} + +/// Looks at a token type and creates the relevant operand +/// from this information, adding to Operands. +/// If operand was parsed, returns false, else true. +bool XtensaAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic, + bool SR) { + // Check if the current operand has a custom associated parser, if so, try to + // custom parse the operand, or fallback to the general approach. + OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); + if (ResTy == MatchOperand_Success) + return false; + + // If there wasn't a custom match, try the generic matcher below. Otherwise, + // there was a match, but an error occurred, in which case, just return that + // the operand parsing failed. + if (ResTy == MatchOperand_ParseFail) + return true; + + // Attempt to parse token as register + if (parseRegister(Operands, true, SR) == MatchOperand_Success) + return false; + + // Attempt to parse token as an immediate + if (parseImmediate(Operands) == MatchOperand_Success) { + return false; + } + + // Finally we have exhausted all options and must declare defeat. + Error(getLoc(), "unknown operand"); + return true; +} + +bool XtensaAsmParser::ParseInstructionWithSR(ParseInstructionInfo &Info, + StringRef Name, SMLoc NameLoc, + OperandVector &Operands) { + if ((Name.startswith("wsr.") || Name.startswith("rsr.") || + Name.startswith("xsr.") || Name.startswith("rur.") || + Name.startswith("wur.")) && + (Name.size() > 4)) { + // Parse case when instruction name is concatenated with SR register + // name, like "wsr.sar a1" + + // First operand is token for instruction + Operands.push_back(XtensaOperand::createToken(Name.take_front(3), NameLoc)); + + StringRef RegName = Name.drop_front(4); + unsigned RegNo = MatchRegisterName(RegName); + + if (RegNo == 0) + RegNo = MatchRegisterAltName(RegName); + + if (RegNo == 0) { + Error(NameLoc, "invalid register name"); + return true; + } + + // Parse operand + if (parseOperand(Operands, Name)) + return true; + + SMLoc S = getLoc(); + SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1); + Operands.push_back(XtensaOperand::createReg(RegNo, S, E)); + } else { + // First operand is token for instruction + Operands.push_back(XtensaOperand::createToken(Name, NameLoc)); + + // Parse first operand + if (parseOperand(Operands, Name)) + return true; + + if (!getLexer().is(AsmToken::Comma)) { + SMLoc Loc = getLexer().getLoc(); + getParser().eatToEndOfStatement(); + return Error(Loc, "unexpected token"); + } + + getLexer().Lex(); + + // Parse second operand + if (parseOperand(Operands, Name, true)) + return true; + } + + if (getLexer().isNot(AsmToken::EndOfStatement)) { + SMLoc Loc = getLexer().getLoc(); + getParser().eatToEndOfStatement(); + return Error(Loc, "unexpected token"); + } + + getParser().Lex(); // Consume the EndOfStatement. + return false; +} + +bool XtensaAsmParser::ParseInstruction(ParseInstructionInfo &Info, + StringRef Name, SMLoc NameLoc, + OperandVector &Operands) { + if (Name.startswith("wsr") || Name.startswith("rsr") || + Name.startswith("xsr") || Name.startswith("rur.") || + Name.startswith("wur.")) { + return ParseInstructionWithSR(Info, Name, NameLoc, Operands); + } + + // First operand is token for instruction + Operands.push_back(XtensaOperand::createToken(Name, NameLoc)); + + // If there are no more operands, then finish + if (getLexer().is(AsmToken::EndOfStatement)) + return false; + + // Parse first operand + if (parseOperand(Operands, Name)) + return true; + + // Parse until end of statement, consuming commas between operands + while (getLexer().is(AsmToken::Comma)) { + // Consume comma token + getLexer().Lex(); + + // Parse next operand + if (parseOperand(Operands, Name)) + return true; + } + + if (getLexer().isNot(AsmToken::EndOfStatement)) { + SMLoc Loc = getLexer().getLoc(); + getParser().eatToEndOfStatement(); + return Error(Loc, "unexpected token"); + } + + getParser().Lex(); // Consume the EndOfStatement. + return false; +} + +bool XtensaAsmParser::ParseDirective(AsmToken DirectiveID) { return true; } + +// Force static initialization. +extern "C" void LLVMInitializeXtensaAsmParser() { + RegisterMCAsmParser X(TheXtensaTarget); +} diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt new file mode 100644 index 0000000000000..6dd4b904d4d8d --- /dev/null +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -0,0 +1,35 @@ +set(LLVM_TARGET_DEFINITIONS Xtensa.td) + +tablegen(LLVM XtensaGenAsmMatcher.inc -gen-asm-matcher) +tablegen(LLVM XtensaGenAsmWriter.inc -gen-asm-writer) +tablegen(LLVM XtensaGenCallingConv.inc -gen-callingconv) +tablegen(LLVM XtensaGenDAGISel.inc -gen-dag-isel) +tablegen(LLVM XtensaGenDisassemblerTables.inc -gen-disassembler) +tablegen(LLVM XtensaGenInstrInfo.inc -gen-instr-info) +tablegen(LLVM XtensaGenMCCodeEmitter.inc -gen-emitter) +tablegen(LLVM XtensaGenRegisterInfo.inc -gen-register-info) +tablegen(LLVM XtensaGenSubtargetInfo.inc -gen-subtarget) + +add_public_tablegen_target(XtensaCommonTableGen) + +add_llvm_target(XtensaCodeGen + XtensaAsmPrinter.cpp + XtensaConstantPoolValue.cpp + XtensaFrameLowering.cpp + XtensaInstrInfo.cpp + XtensaISelDAGToDAG.cpp + XtensaISelLowering.cpp + XtensaMachineFunctionInfo.cpp + XtensaMCInstLower.cpp + XtensaRegisterInfo.cpp + XtensaSizeReductionPass.cpp + XtensaSubtarget.cpp + XtensaTargetMachine.cpp + XtensaTargetObjectFile.cpp + ) + +add_subdirectory(AsmParser) +add_subdirectory(Disassembler) +add_subdirectory(MCTargetDesc) +add_subdirectory(TargetInfo) + diff --git a/llvm/lib/Target/Xtensa/Disassembler/CMakeLists.txt b/llvm/lib/Target/Xtensa/Disassembler/CMakeLists.txt new file mode 100644 index 0000000000000..54590f7daf164 --- /dev/null +++ b/llvm/lib/Target/Xtensa/Disassembler/CMakeLists.txt @@ -0,0 +1,3 @@ +add_llvm_library(LLVMXtensaDisassembler + XtensaDisassembler.cpp + ) diff --git a/llvm/lib/Target/Xtensa/Disassembler/LLVMBuild.txt b/llvm/lib/Target/Xtensa/Disassembler/LLVMBuild.txt new file mode 100644 index 0000000000000..abfc6d7fabec8 --- /dev/null +++ b/llvm/lib/Target/Xtensa/Disassembler/LLVMBuild.txt @@ -0,0 +1,23 @@ +;===-- ./lib/Target/Xtensa/Disassembler/LLVMBuild.txt ---------*- Conf -*--===; +; +; The LLVM Compiler Infrastructure +; +; This file is distributed under the University of Illinois Open Source +; License. See LICENSE.TXT for details. +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = XtensaDisassembler +parent = Xtensa +required_libraries = MC MCDisassembler Support XtensaDesc XtensaInfo +add_to_library_groups = Xtensa diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp new file mode 100644 index 0000000000000..6b84acec698cf --- /dev/null +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -0,0 +1,423 @@ +//===-- XtensaDisassembler.cpp - Disassembler for Xtensa ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the XtensaDisassembler class. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/XtensaMCTargetDesc.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCDisassembler/MCDisassembler.h" +#include "llvm/MC/MCFixedLenDisassembler.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "Xtensa-disassembler" + +typedef MCDisassembler::DecodeStatus DecodeStatus; + +namespace { + +class XtensaDisassembler : public MCDisassembler { + bool IsLittleEndian; + +public: + XtensaDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, bool isLE) + : MCDisassembler(STI, Ctx), IsLittleEndian(isLE) {} + + bool hasDensity() const { + return STI.getFeatureBits()[Xtensa::FeatureDensity]; + } + + DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size, + ArrayRef Bytes, uint64_t Address, + raw_ostream &VStream, + raw_ostream &CStream) const override; +}; +} // end anonymous namespace + +static MCDisassembler *createXtensaDisassembler(const Target &T, + const MCSubtargetInfo &STI, + MCContext &Ctx) { + return new XtensaDisassembler(STI, Ctx, true); +} + +extern "C" void LLVMInitializeXtensaDisassembler() { + TargetRegistry::RegisterMCDisassembler(TheXtensaTarget, + createXtensaDisassembler); +} + +static const unsigned ARDecoderTable[] = { + Xtensa::A0, Xtensa::SP, Xtensa::A2, Xtensa::A3, Xtensa::A4, Xtensa::A5, + Xtensa::A6, Xtensa::A7, Xtensa::A8, Xtensa::A9, Xtensa::A10, Xtensa::A11, + Xtensa::A12, Xtensa::A13, Xtensa::A14, Xtensa::A15}; + +static DecodeStatus DecodeARRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > sizeof(ARDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = ARDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static const unsigned FPRDecoderTable[] = { + Xtensa::F0, Xtensa::F1, Xtensa::F2, Xtensa::F3, Xtensa::F4, Xtensa::F5, + Xtensa::F6, Xtensa::F7, Xtensa::F8, Xtensa::F9, Xtensa::F10, Xtensa::F11, + Xtensa::F12, Xtensa::F13, Xtensa::F14, Xtensa::F15}; + +static DecodeStatus DecodeFPRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > sizeof(FPRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = FPRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static const unsigned BRDecoderTable[] = { + Xtensa::B0, Xtensa::B1, Xtensa::B2, Xtensa::B3, Xtensa::B4, Xtensa::B5, + Xtensa::B6, Xtensa::B7, Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, + Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; + +static DecodeStatus DecodeBRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > sizeof(BRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = BRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static const unsigned SRDecoderTable[] = { + Xtensa::SAR, 3, Xtensa::SCOMPARE1, 12, Xtensa::IBREAKENABLE, 96, + Xtensa::MEMCTL, 97, Xtensa::IBREAKA0, 128, Xtensa::IBREAKA1, 129, + Xtensa::DBREAKA0, 144, Xtensa::DBREAKA1, 145, Xtensa::DBREAKC0, 160, + Xtensa::DBREAKC1, 161, Xtensa::CONFIGID0, 176, Xtensa::CONFIGID1, 208, + Xtensa::EPC1, 177, Xtensa::EPC2, 178, Xtensa::EPC3, 179, + Xtensa::EPC4, 180, Xtensa::EPC5, 181, Xtensa::EPC6, 182, + Xtensa::EPC7, 183, Xtensa::INTSET, 226, Xtensa::INTENABLE, 228, + Xtensa::PS, 230, Xtensa::VECBASE, 231, Xtensa::DEBUGCAUSE, 233, + Xtensa::CCOUNT, 234, Xtensa::PRID, 235, Xtensa::CCOMPARE0, 240, + Xtensa::CCOMPARE1, 241, Xtensa::CCOMPARE2, 242}; + +static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 255) + return MCDisassembler::Fail; + + for (unsigned i = 0; i < sizeof(SRDecoderTable); i += 2) { + if (SRDecoderTable[i + 1] == RegNo) { + unsigned Reg = SRDecoderTable[i]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; + } + } + + return MCDisassembler::Fail; +} + +static const unsigned URDecoderTable[] = {Xtensa::THREADPTR, 231}; + +static DecodeStatus DecodeURRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 255) + return MCDisassembler::Fail; + + for (unsigned i = 0; i < sizeof(URDecoderTable); i += 2) { + if (URDecoderTable[i + 1] == RegNo) { + unsigned Reg = URDecoderTable[i]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; + } + } + + return MCDisassembler::Fail; +} + +static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch, + uint64_t Address, uint64_t Offset, + uint64_t Width, MCInst &MI, + const void *Decoder) { + const MCDisassembler *Dis = static_cast(Decoder); + return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch, Offset, + Width); +} + +static DecodeStatus decodeCallOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<18>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(SignExtend64<20>(Imm << 2))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeJumpOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<18>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(SignExtend64<18>(Imm))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeBranchOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + switch (Inst.getOpcode()) { + case Xtensa::BEQZ: + case Xtensa::BGEZ: + case Xtensa::BLTZ: + case Xtensa::BNEZ: + assert(isUInt<12>(Imm) && "Invalid immediate"); + if (!tryAddingSymbolicOperand(SignExtend64<12>(Imm) + 4 + Address, true, + Address, 0, 3, Inst, Decoder)) + Inst.addOperand(MCOperand::createImm(SignExtend64<12>(Imm))); + break; + default: + assert(isUInt<8>(Imm) && "Invalid immediate"); + if (!tryAddingSymbolicOperand(SignExtend64<8>(Imm) + 4 + Address, true, + Address, 0, 3, Inst, Decoder)) + Inst.addOperand(MCOperand::createImm(SignExtend64<8>(Imm))); + } + return MCDisassembler::Success; +} + +static DecodeStatus decodeL32ROperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + + assert(isUInt<16>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm( + SignExtend64<17>((Imm << 2) + 0x40000 + (Address & 0x3)))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(SignExtend64<8>(Imm))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm8_sh8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Imm << 8))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm12Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<12>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(SignExtend64<12>(Imm))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeUimm4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeUimm5Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<5>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm1_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm + 1)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm1n_15Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + if (!Imm) + Inst.addOperand(MCOperand::createImm(-1)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm32n_95Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<7>(Imm) && "Invalid immediate"); + if ((Imm & 0x60) == 0x60) + Inst.addOperand(MCOperand::createImm((~0x1f) | Imm)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm8n_7Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + if (Imm > 7) + Inst.addOperand(MCOperand::createImm(Imm - 16)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm64n_4nOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm((~0x3f) | (Imm << 2))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeShimm1_31Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(32 - Imm)); + return MCDisassembler::Success; +} + +static int64_t TableB4const[16] = {-1, 1, 2, 3, 4, 5, 6, 7, + 8, 10, 12, 16, 32, 64, 128, 256}; +static DecodeStatus decodeB4constOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + + Inst.addOperand(MCOperand::createImm(TableB4const[Imm])); + return MCDisassembler::Success; +} + +static int64_t TableB4constu[16] = {32768, 65536, 2, 3, 4, 5, 6, 7, + 8, 10, 12, 16, 32, 64, 128, 256}; +static DecodeStatus decodeB4constuOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + + Inst.addOperand(MCOperand::createImm(TableB4constu[Imm])); + return MCDisassembler::Success; +} + +static DecodeStatus decodeMem8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<12>(Imm) && "Invalid immediate"); + DecodeARRegisterClass(Inst, Imm & 0xf, Address, Decoder); + Inst.addOperand(MCOperand::createImm((Imm >> 4) & 0xff)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeMem16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<12>(Imm) && "Invalid immediate"); + DecodeARRegisterClass(Inst, Imm & 0xf, Address, Decoder); + Inst.addOperand(MCOperand::createImm((Imm >> 3) & 0x1fe)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeMem32Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<12>(Imm) && "Invalid immediate"); + DecodeARRegisterClass(Inst, Imm & 0xf, Address, Decoder); + Inst.addOperand(MCOperand::createImm((Imm >> 2) & 0x3fc)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeMem32nOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + DecodeARRegisterClass(Inst, Imm & 0xf, Address, Decoder); + Inst.addOperand(MCOperand::createImm((Imm >> 2) & 0x3c)); + return MCDisassembler::Success; +} + +/// Read two bytes from the ArrayRef and return 16 bit data sorted +/// according to the given endianness. +static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, + uint64_t &Size, uint32_t &Insn, + bool IsLittleEndian) { + // We want to read exactly 2 Bytes of data. + if (Bytes.size() < 2) { + Size = 0; + return MCDisassembler::Fail; + } + + if (!IsLittleEndian) { + llvm_unreachable("Big-endian mode currently is not supported!"); + } else { + Insn = (Bytes[1] << 8) | Bytes[0]; + } + + return MCDisassembler::Success; +} + +/// Read four bytes from the ArrayRef and return 24 bit data sorted +/// according to the given endianness. +static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, + uint64_t &Size, uint32_t &Insn, + bool IsLittleEndian) { + // We want to read exactly 3 Bytes of data. + if (Bytes.size() < 3) { + Size = 0; + return MCDisassembler::Fail; + } + + if (!IsLittleEndian) { + llvm_unreachable("Big-endian mode currently is not supported!"); + } else { + Insn = (Bytes[2] << 16) | (Bytes[1] << 8) | (Bytes[0] << 0); + } + + return MCDisassembler::Success; +} + +#include "XtensaGenDisassemblerTables.inc" + +DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, + ArrayRef Bytes, + uint64_t Address, + raw_ostream &OS, + raw_ostream &CS) const { + uint32_t Insn; + DecodeStatus Result; + + if (hasDensity()) { + Result = readInstruction16(Bytes, Address, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + LLVM_DEBUG(dbgs() << "Trying Xtensa 16-bit instruction table :\n"); + Result = decodeInstruction(DecoderTable16, MI, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 2; + return Result; + } + } + + Result = readInstruction24(Bytes, Address, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + LLVM_DEBUG(dbgs() << "Trying Xtensa 24-bit instruction table :\n"); + Result = decodeInstruction(DecoderTable24, MI, Insn, Address, this, STI); + Size = 3; + return Result; +} diff --git a/llvm/lib/Target/Xtensa/LLVMBuild.txt b/llvm/lib/Target/Xtensa/LLVMBuild.txt new file mode 100644 index 0000000000000..36d37e9b16613 --- /dev/null +++ b/llvm/lib/Target/Xtensa/LLVMBuild.txt @@ -0,0 +1,17 @@ +[common] +subdirectories = AsmParser Disassembler TargetInfo MCTargetDesc + +[component_0] +type = TargetGroup +name = Xtensa +parent = Target +has_asmparser = 1 +has_asmprinter = 1 +has_disassembler = 1 + +[component_1] +type = Library +name = XtensaCodeGen +parent = Xtensa +required_libraries = AsmPrinter CodeGen Core MC SelectionDAG XtensaDesc XtensaInfo Support Target +add_to_library_groups = Xtensa diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/CMakeLists.txt b/llvm/lib/Target/Xtensa/MCTargetDesc/CMakeLists.txt new file mode 100644 index 0000000000000..a2f07fde00a18 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/CMakeLists.txt @@ -0,0 +1,10 @@ +add_llvm_library(LLVMXtensaDesc + XtensaAsmBackend.cpp + XtensaELFObjectWriter.cpp + XtensaInstPrinter.cpp + XtensaMCAsmInfo.cpp + XtensaMCCodeEmitter.cpp + XtensaMCExpr.cpp + XtensaMCTargetDesc.cpp + XtensaTargetStreamer.cpp + ) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/LLVMBuild.txt b/llvm/lib/Target/Xtensa/MCTargetDesc/LLVMBuild.txt new file mode 100644 index 0000000000000..059385de9bb32 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/LLVMBuild.txt @@ -0,0 +1,16 @@ +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = XtensaDesc +parent = Xtensa +required_libraries = MC XtensaInfo Support +add_to_library_groups = Xtensa diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp new file mode 100644 index 0000000000000..4e51165a57041 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp @@ -0,0 +1,220 @@ +//===-- XtensaMCAsmBackend.cpp - Xtensa assembler backend ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===--------------------------------------------------------------------===// + +#include "MCTargetDesc/XtensaFixupKinds.h" +#include "MCTargetDesc/XtensaMCTargetDesc.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +namespace llvm { +class MCObjectTargetWriter; +class XtensaMCAsmBackend : public MCAsmBackend { + uint8_t OSABI; + bool IsLittleEndian; + +public: + XtensaMCAsmBackend(uint8_t osABI, bool isLE) + : MCAsmBackend(support::little), OSABI(osABI), IsLittleEndian(isLE) {} + + unsigned getNumFixupKinds() const override { + return Xtensa::NumTargetFixupKinds; + } + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; + void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, + const MCValue &Target, MutableArrayRef Data, + uint64_t Value, bool IsResolved, + const MCSubtargetInfo *STI) const override; + bool mayNeedRelaxation(const MCInst &Inst, + const MCSubtargetInfo &STI) const override; + bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, + const MCRelaxableFragment *Fragment, + const MCAsmLayout &Layout) const override; + void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, + MCInst &Res) const override; + bool writeNopData(raw_ostream &OS, uint64_t Count) const override; + + std::unique_ptr createObjectTargetWriter() const { + return createXtensaObjectWriter(OSABI, IsLittleEndian); + } +}; +} // namespace llvm + +const MCFixupKindInfo & +XtensaMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { + const static MCFixupKindInfo Infos[Xtensa::NumTargetFixupKinds] = { + // name offset bits flags + {"fixup_xtensa_branch_6", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, + {"fixup_xtensa_branch_8", 16, 8, MCFixupKindInfo::FKF_IsPCRel}, + {"fixup_xtensa_branch_12", 12, 12, MCFixupKindInfo::FKF_IsPCRel}, + {"fixup_xtensa_jump_18", 6, 18, MCFixupKindInfo::FKF_IsPCRel}, + {"fixup_xtensa_call_18", 6, 18, + MCFixupKindInfo::FKF_IsPCRel | + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, + {"fixup_xtensa_l32r_16", 8, 16, + MCFixupKindInfo::FKF_IsPCRel | + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}}; + + if (Kind < FirstTargetFixupKind) + return MCAsmBackend::getFixupKindInfo(Kind); + assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && + "Invalid kind!"); + return Infos[Kind - FirstTargetFixupKind]; +} + +static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, + MCContext &Ctx) { + unsigned Kind = Fixup.getKind(); + switch (Kind) { + default: + llvm_unreachable("Unknown fixup kind!"); + case FK_Data_1: + case FK_Data_2: + case FK_Data_4: + case FK_Data_8: + return Value; + case Xtensa::fixup_xtensa_branch_6: { + Value -= 4; + if (!isInt<6>(Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + unsigned Hi2 = (Value >> 4) & 0x3; + unsigned Lo4 = (Value)&0xf; + return (Hi2 << 4) | (Lo4 << 12); + } + case Xtensa::fixup_xtensa_branch_8: + Value -= 4; + if (!isInt<8>(Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + return (Value & 0xff); + case Xtensa::fixup_xtensa_branch_12: + Value -= 4; + if (!isInt<12>(Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + return (Value & 0xfff); + case Xtensa::fixup_xtensa_jump_18: + Value -= 4; + if (!isInt<18>(Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + return (Value & 0x3ffff); + case Xtensa::fixup_xtensa_call_18: + Value -= 4; + if (!isInt<20>(Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + if (Value & 0x3) + Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); + return (Value & 0xffffc) >> 2; + case Xtensa::fixup_xtensa_l32r_16: + unsigned Offset = Fixup.getOffset(); + if (Offset & 0x3) + Value -= 4; + if (!isInt<18>(Value) && (Value & 0x20000)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + if (Value & 0x3) + Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); + return (Value & 0x3fffc) >> 2; + } +} + +static unsigned getSize(unsigned Kind) { + switch (Kind) { + default: + return 3; + case MCFixupKind::FK_Data_4: + return 4; + case Xtensa::fixup_xtensa_branch_6: + return 2; + } +} + +void XtensaMCAsmBackend::applyFixup(const MCAssembler &Asm, + const MCFixup &Fixup, const MCValue &Target, + MutableArrayRef Data, uint64_t Value, + bool IsResolved, + const MCSubtargetInfo *STI) const { + MCContext &Ctx = Asm.getContext(); + MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); + + Value = adjustFixupValue(Fixup, Value, Ctx); + + // Shift the value into position. + Value <<= Info.TargetOffset; + + if (!Value) + return; // Doesn't change encoding. + + unsigned Offset = Fixup.getOffset(); + unsigned FullSize = getSize(Fixup.getKind()); + + for (unsigned i = 0; i != FullSize; ++i) { + Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); + } +} + +bool XtensaMCAsmBackend::mayNeedRelaxation(const MCInst &Inst, + const MCSubtargetInfo &STI) const { + return false; +} + +bool XtensaMCAsmBackend::fixupNeedsRelaxation( + const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *Fragment, + const MCAsmLayout &Layout) const { + return false; +} + +void XtensaMCAsmBackend::relaxInstruction(const MCInst &Inst, + const MCSubtargetInfo &STI, + MCInst &Res) const {} + +bool XtensaMCAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { + uint64_t NumNops24b = Count / 3; + + for (uint64_t i = 0; i != NumNops24b; ++i) { + // Currently just little-endian machine supported, + // but probably big-endian will be also implemented in future + if (IsLittleEndian) { + OS.write("\xf0", 1); + OS.write("\x20", 1); + OS.write("\0x00", 1); + } else { + llvm_unreachable("Big-endian mode currently is not supported!"); + } + Count -= 3; + } + + // TODO maybe function should return error if (Count > 0) + switch (Count) { + default: + break; + case 1: + OS.write("\0", 1); + break; + case 2: + OS.write("\0\0", 2); + break; + } + + return true; +} + +MCAsmBackend *llvm::createXtensaMCAsmBackend(const Target &T, + const MCSubtargetInfo &STI, + const MCRegisterInfo &MRI, + const MCTargetOptions &Options) { + uint8_t OSABI = + MCELFObjectTargetWriter::getOSABI(STI.getTargetTriple().getOS()); + return new llvm::XtensaMCAsmBackend(OSABI, true); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaBaseInfo.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaBaseInfo.h new file mode 100644 index 0000000000000..8e4d284ddf84f --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaBaseInfo.h @@ -0,0 +1,41 @@ +//===-- XtensaBaseInfo.h - Top level definitions for Xtensa MC ---*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file contains small standalone helper functions and enum definitions +// for the Xtensa target useful for the compiler back-end and the MC libraries. +// +//===---------------------------------------------------------------------===// +#ifndef LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSABASEINFO_H +#define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSABASEINFO_H + +#include "XtensaFixupKinds.h" +#include "XtensaMCTargetDesc.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +/// XtensaII - This namespace holds all of the target specific flags that +/// instruction info tracks. +/// +namespace XtensaII { +/// Target Operand Flag enum. +enum TOF { + //===------------------------------------------------------------------===// + // Xtensa Specific MachineOperand flags. + MO_NO_FLAG, + MO_TPOFF, // Represents the offset from the thread pointer + MO_PLT +}; + +} // namespace XtensaII +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp new file mode 100644 index 0000000000000..9821a1ae9c4e3 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp @@ -0,0 +1,69 @@ +//===-- XtensaMCObjectWriter.cpp - Xtensa ELF writer ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/XtensaMCTargetDesc.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include + +using namespace llvm; + +namespace { +class XtensaObjectWriter : public MCELFObjectTargetWriter { +public: + XtensaObjectWriter(uint8_t OSABI); + + virtual ~XtensaObjectWriter(); + +protected: + unsigned getRelocType(MCContext &Ctx, const MCValue &Target, + const MCFixup &Fixup, bool IsPCRel) const override; + bool needsRelocateWithSymbol(const MCSymbol &Sym, + unsigned Type) const override; +}; +} // namespace + +XtensaObjectWriter::XtensaObjectWriter(uint8_t OSABI) + : MCELFObjectTargetWriter(false, OSABI, ELF::EM_XTENSA, + /*HasRelocationAddend=*/true) {} + +XtensaObjectWriter::~XtensaObjectWriter() {} + +unsigned XtensaObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const { + MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant(); + + switch ((unsigned)Fixup.getKind()) { + case FK_Data_4: + if (Modifier == MCSymbolRefExpr::VariantKind::VK_TPOFF) + return ELF::R_XTENSA_TLS_TPOFF; + else + return ELF::R_XTENSA_32; + default: + return ELF::R_XTENSA_SLOT0_OP; + } +} + +std::unique_ptr +llvm::createXtensaObjectWriter(uint8_t OSABI, bool IsLittleEndian) { + return std::make_unique(OSABI); +} + +bool XtensaObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym, + unsigned Type) const { + return false; +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h new file mode 100644 index 0000000000000..0bfd69a6cfb62 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h @@ -0,0 +1,31 @@ +//===-- XtensaMCFixups.h - Xtensa-specific fixup entries --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAMCFIXUPS_H +#define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAMCFIXUPS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { +namespace Xtensa { +enum FixupKind { + fixup_xtensa_branch_6 = FirstTargetFixupKind, + fixup_xtensa_branch_8, + fixup_xtensa_branch_12, + fixup_xtensa_jump_18, + fixup_xtensa_call_18, + fixup_xtensa_l32r_16, + fixup_xtensa_invalid, + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind +}; +} // end namespace Xtensa +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAMCFIXUPS_H */ diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp new file mode 100644 index 0000000000000..b6e5d69065edc --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -0,0 +1,421 @@ +//===- XtensaInstPrinter.cpp - Convert Xtensa MCInst to asm syntax --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class prints an Xtensa MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#include "XtensaInstPrinter.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +#define DEBUG_TYPE "asm-printer" + +#include "XtensaGenAsmWriter.inc" + +void XtensaInstPrinter::printAddress(unsigned Base, int64_t Disp, + raw_ostream &O) { + O << Disp; + if (Base) { + O << '('; + O << getRegisterName(Base) << ')'; + } +} + +static void printExpr(const MCExpr *Expr, raw_ostream &OS) { + int Offset = 0; + const MCSymbolRefExpr *SRE; + + if (!(SRE = dyn_cast(Expr))) + assert(false && "Unexpected MCExpr type."); + + MCSymbolRefExpr::VariantKind Kind = SRE->getKind(); + + switch (Kind) { + case MCSymbolRefExpr::VK_None: + break; + // TODO + default: + llvm_unreachable("Invalid kind!"); + } + + OS << SRE->getSymbol(); + + if (Offset) { + if (Offset > 0) + OS << '+'; + OS << Offset; + } + + if (Kind != MCSymbolRefExpr::VK_None) + OS << ')'; +} + +void XtensaInstPrinter::printOperand(const MCOperand &MC, raw_ostream &O) { + if (MC.isReg()) + O << getRegisterName(MC.getReg()); + else if (MC.isImm()) + O << MC.getImm(); + else if (MC.isExpr()) + printExpr(MC.getExpr(), O); + else + llvm_unreachable("Invalid operand"); +} + +void XtensaInstPrinter::printInst(const MCInst *MI, raw_ostream &O, + StringRef Annot, const MCSubtargetInfo &STI) { + printInstruction(MI, O); + printAnnotation(O, Annot); +} + +void XtensaInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const { + O << getRegisterName(RegNo); +} + +void XtensaInstPrinter::printOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + printOperand(MI->getOperand(OpNum), O); +} + +void XtensaInstPrinter::printMemOperand(const MCInst *MI, int opNum, + raw_ostream &OS) { + OS << getRegisterName(MI->getOperand(opNum).getReg()); + OS << ", "; + printOperand(MI, opNum + 1, OS); +} + +void XtensaInstPrinter::printBranchTarget(const MCInst *MI, int opNum, + raw_ostream &OS) { + const MCOperand &MC = MI->getOperand(opNum); + if (MI->getOperand(opNum).isImm()) { + int64_t Val = MC.getImm() + 4; + OS << ". "; + if (Val > 0) + OS << '+'; + OS << Val; + } else if (MC.isExpr()) + MC.getExpr()->print(OS, &MAI, true); + else + llvm_unreachable("Invalid operand"); +} + +void XtensaInstPrinter::printJumpTarget(const MCInst *MI, int OpNum, + raw_ostream &OS) { + const MCOperand &MC = MI->getOperand(OpNum); + if (MC.isImm()) { + int64_t Val = MC.getImm() + 4; + OS << ". "; + if (Val > 0) + OS << '+'; + OS << Val; + } else if (MC.isExpr()) + MC.getExpr()->print(OS, &MAI, true); + else + llvm_unreachable("Invalid operand"); + ; +} + +void XtensaInstPrinter::printCallOperand(const MCInst *MI, int OpNum, + raw_ostream &OS) { + const MCOperand &MC = MI->getOperand(OpNum); + if (MC.isImm()) { + int64_t Val = MC.getImm() + 4; + OS << ". "; + if (Val > 0) + OS << '+'; + OS << Val; + } else if (MC.isExpr()) + MC.getExpr()->print(OS, &MAI, true); + else + llvm_unreachable("Invalid operand"); +} + +void XtensaInstPrinter::printL32RTarget(const MCInst *MI, int OpNum, + raw_ostream &O) { + const MCOperand &MC = MI->getOperand(OpNum); + if (MC.isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + int64_t InstrOff = Value & 0x3; + Value -= InstrOff; + assert((Value >= -262144 && Value <= -4) && + "Invalid argument, value must be in ranges [-262144,-4]"); + Value += ((InstrOff + 0x3) & 0x4) - InstrOff; + O << ". "; + O << Value; + } else if (MC.isExpr()) + MC.getExpr()->print(O, &MAI, true); + else + llvm_unreachable("Invalid operand"); +} + +void XtensaInstPrinter::printImm8_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -128 && Value <= 127) && + "Invalid argument, value must be in ranges [-128,127]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm8_sh8_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -32768 && Value <= 32512 && ((Value & 0xFF) == 0)) && + "Invalid argument, value must be multiples of 256 in range " + "[-32768,32512]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm12_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -2048 && Value <= 2047) && + "Invalid argument, value must be in ranges [-2048,2047]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm12m_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -2048 && Value <= 2047) && + "Invalid argument, value must be in ranges [-2048,2047]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printUimm4_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 15) && "Invalid argument"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printUimm5_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 31) && "Invalid argument"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printShimm1_31_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 1 && Value <= 31) && + "Invalid argument, value must be in range [1,31]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm1_16_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 1 && Value <= 16) && + "Invalid argument, value must be in range [1,16]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm1n_15_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -1 && (Value != 0) && Value <= 15) && + "Invalid argument, value must be in ranges <-1,-1> or <1,15>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -32 && Value <= 95) && + "Invalid argument, value must be in ranges <-32,95>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm8n_7_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -8 && Value <= 7) && + "Invalid argument, value must be in ranges <-8,7>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm64n_4n_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -64 && Value <= -4) & ((Value & 0x3) == 0) && + "Invalid argument, value must be in ranges <-64,-4>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset8m8_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 255) && + "Invalid argument, value must be in range [0,255]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset8m16_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 510 && ((Value & 0x1) == 0)) && + "Invalid argument, value must be multiples of two in range [0,510]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset8m32_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert( + (Value >= 0 && Value <= 1020 && ((Value & 0x3) == 0)) && + "Invalid argument, value must be multiples of four in range [0,1020]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset4m32_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 60 && ((Value & 0x3) == 0)) && + "Invalid argument, value must be multiples of four in range [0,60]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printEntry_Imm12_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 32760) && + "Invalid argument, value must be multiples of eight in range " + "<0,32760>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printB4const_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + + switch (Value) { + case -1: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 10: + case 12: + case 16: + case 32: + case 64: + case 128: + case 256: + break; + default: + assert((0) && "Invalid B4const argument"); + } + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printB4constu_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + + switch (Value) { + case 32768: + case 65536: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 10: + case 12: + case 16: + case 32: + case 64: + case 128: + case 256: + break; + default: + assert((0) && "Invalid B4constu argument"); + } + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 7 && Value <= 22) && + "Invalid argument, value must be in range <7,22>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h new file mode 100644 index 0000000000000..e06016bf8c101 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -0,0 +1,76 @@ +//===- XtensaInstPrinter.h - Convert Xtensa MCInst to asm syntax -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class prints an Xtensa MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAINSTPRINTER_H +#define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAINSTPRINTER_H + +#include "llvm/MC/MCInstPrinter.h" +#include "llvm/Support/Compiler.h" + +namespace llvm { +class MCOperand; + +class XtensaInstPrinter : public MCInstPrinter { +public: + XtensaInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, + const MCRegisterInfo &MRI) + : MCInstPrinter(MAI, MII, MRI) {} + + // Automatically generated by tblgen. + void printInstruction(const MCInst *MI, raw_ostream &O); + static const char *getRegisterName(unsigned RegNo); + + // Print the given operand. + static void printOperand(const MCOperand &MO, raw_ostream &O); + + // Print an address + static void printAddress(unsigned Base, int64_t Disp, raw_ostream &O); + + // Override MCInstPrinter. + void printRegName(raw_ostream &O, unsigned RegNo) const override; + void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot, + const MCSubtargetInfo &STI) override; + +private: + // Print various types of operand. + void printOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printMemOperand(const MCInst *MI, int OpNUm, raw_ostream &O); + void printBranchTarget(const MCInst *MI, int OpNum, raw_ostream &O); + void printJumpTarget(const MCInst *MI, int OpNum, raw_ostream &O); + void printCallOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printL32RTarget(const MCInst *MI, int OpNum, raw_ostream &O); + + void printImm8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm8_sh8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm12_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm12m_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printUimm4_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printUimm5_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printShimm1_31_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm1_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm1n_15_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm8n_7_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm64n_4n_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset8m8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset8m16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset8m32_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset4m32_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printEntry_Imm12_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printB4constu_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); +}; +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAINSTPRINTER_H */ diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp new file mode 100644 index 0000000000000..baf0a84eeea05 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp @@ -0,0 +1,31 @@ +//===-- XtensaMCAsmInfo.cpp - Xtensa Asm Properties -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the XtensaMCAsmInfo properties. +// +//===----------------------------------------------------------------------===// + +#include "XtensaMCAsmInfo.h" +#include "llvm/ADT/Triple.h" + +using namespace llvm; + +XtensaMCAsmInfo::XtensaMCAsmInfo(const Triple &TT) { + CodePointerSize = 4; + CalleeSaveStackSlotSize = 4; + PrivateGlobalPrefix = ".L"; + CommentString = "#"; + ZeroDirective = "\t.space\t"; + Data64bitsDirective = "\t.quad\t"; + GlobalDirective = "\t.global\t"; + UsesELFSectionDirectiveForBSS = true; + SupportsDebugInformation = true; + ExceptionsType = ExceptionHandling::DwarfCFI; + AlignmentIsInBytes = false; +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.h new file mode 100644 index 0000000000000..827521f4ba17a --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.h @@ -0,0 +1,29 @@ +//===-- XtensaMCAsmInfo.h - Xtensa Asm Info --------------------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the XtensaMCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSATARGETASMINFO_H +#define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSATARGETASMINFO_H + +#include "llvm/MC/MCAsmInfoELF.h" + +namespace llvm { +class Triple; + +class XtensaMCAsmInfo : public MCAsmInfoELF { +public: + explicit XtensaMCAsmInfo(const Triple &TT); +}; + +} // namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSATARGETASMINFO_H */ diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp new file mode 100644 index 0000000000000..b612bca46697c --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -0,0 +1,559 @@ +//===-- XtensaMCCodeEmitter.cpp - Convert Xtensa Code to Machine Code -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the XtensaMCCodeEmitter class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mccodeemitter" +#include "MCTargetDesc/XtensaFixupKinds.h" +#include "MCTargetDesc/XtensaMCExpr.h" +#include "MCTargetDesc/XtensaMCTargetDesc.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" + +#define GET_INSTRMAP_INFO +#include "XtensaGenInstrInfo.inc" +#undef GET_INSTRMAP_INFO + +using namespace llvm; + +namespace { +class XtensaMCCodeEmitter : public MCCodeEmitter { + const MCInstrInfo &MCII; + MCContext &Ctx; + bool IsLittleEndian; + +public: + XtensaMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool isLE) + : MCII(mcii), Ctx(ctx), IsLittleEndian(isLE) {} + + ~XtensaMCCodeEmitter() {} + + // OVerride MCCodeEmitter. + void encodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const override; + +private: + // Automatically generated by TableGen. + uint64_t getBinaryCodeForInstr(const MCInst &MI, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + // Called by the TableGen code to get the binary encoding of operand + // MO in MI. Fixups is the list of fixups against MI. + unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getJumpTargetEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getBranchTargetEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getCallEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getL32RTargetEncoding(const MCInst &MI, unsigned OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getMemRegEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm8_sh8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm12OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getUimm4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getUimm5OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm1_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm1n_15OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm32n_95OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm8n_7OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getImm64n_4nOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getShimm1_31OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getB4constOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getB4constuOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getShimmSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; +}; +} // namespace + +MCCodeEmitter *llvm::createXtensaMCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + MCContext &Ctx) { + return new XtensaMCCodeEmitter(MCII, Ctx, true); +} + +void XtensaMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI); + unsigned Size = MCII.get(MI.getOpcode()).getSize(); + + if (IsLittleEndian) { + // Little-endian insertion of Size bytes. + unsigned ShiftValue = 0; + for (unsigned I = 0; I != Size; ++I) { + OS << uint8_t(Bits >> ShiftValue); + ShiftValue += 8; + } + } else { + // TODO Big-endian insertion of Size bytes. + llvm_unreachable("Big-endian mode currently is not supported!"); + } +} + +unsigned +XtensaMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + if (MO.isReg()) + return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); + if (MO.isImm()) { + long res = static_cast(MO.getImm()); + return res; + } + + llvm_unreachable("Unhandled expression!"); + return 0; +} + +unsigned +XtensaMCCodeEmitter::getJumpTargetEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNum); + + if (MO.isImm()) + return MO.getImm(); + + const MCExpr *Expr = MO.getExpr(); + Fixups.push_back(MCFixup::create( + 0, Expr, MCFixupKind(Xtensa::fixup_xtensa_jump_18), MI.getLoc())); + return 0; +} + +unsigned XtensaMCCodeEmitter::getBranchTargetEncoding( + const MCInst &MI, unsigned int OpNum, SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNum); + if (MO.isImm()) + return static_cast(MO.getImm()); + + const MCExpr *Expr = MO.getExpr(); + switch (MI.getOpcode()) { + case Xtensa::BEQZ: + case Xtensa::BGEZ: + case Xtensa::BLTZ: + case Xtensa::BNEZ: + Fixups.push_back(MCFixup::create( + 0, Expr, MCFixupKind(Xtensa::fixup_xtensa_branch_12), MI.getLoc())); + return 0; + default: + Fixups.push_back(MCFixup::create( + 0, Expr, MCFixupKind(Xtensa::fixup_xtensa_branch_8), MI.getLoc())); + return 0; + } +} + +unsigned +XtensaMCCodeEmitter::getCallEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNum); + if (MO.isImm()) { + int32_t Res = MO.getImm(); + if (Res & 0x3) { + llvm_unreachable("Unexpected operand value!"); + } + Res >>= 2; + return Res; + } + + assert((MO.isExpr()) && "Unexpected operand value!"); + const MCExpr *Expr = MO.getExpr(); + Fixups.push_back(MCFixup::create( + 0, Expr, MCFixupKind(Xtensa::fixup_xtensa_call_18), MI.getLoc())); + return 0; +} + +unsigned +XtensaMCCodeEmitter::getL32RTargetEncoding(const MCInst &MI, unsigned OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNum); + if (MO.isImm()) { + int32_t Res = MO.getImm(); + // We don't check first 2 bits, because in these bits we could store first 2 + // bits of instruction address + Res >>= 2; + return Res; + } + + assert((MO.isExpr()) && "Unexpected operand value!"); + + Fixups.push_back(MCFixup::create( + 0, MO.getExpr(), MCFixupKind(Xtensa::fixup_xtensa_l32r_16), MI.getLoc())); + return 0; +} + +unsigned +XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + assert(MI.getOperand(OpNo + 1).isImm()); + + long Res = static_cast(MI.getOperand(OpNo + 1).getImm()); + + switch (MI.getOpcode()) { + case Xtensa::S16I: + case Xtensa::L16SI: + case Xtensa::L16UI: + if (Res & 0x1) { + llvm_unreachable("Unexpected operand value!"); + } + Res >>= 1; + break; + case Xtensa::S32I: + case Xtensa::L32I: + case Xtensa::S32I_N: + case Xtensa::L32I_N: + case Xtensa::S32F: + case Xtensa::L32F: + case Xtensa::S32C1I: + if (Res & 0x3) { + llvm_unreachable("Unexpected operand value!"); + } + Res >>= 2; + break; + } + + unsigned OffBits = Res << 4; + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI); + + return ((OffBits & 0xFF0) | RegBits); +} + +unsigned XtensaMCCodeEmitter::getImm8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = MO.getImm(); + + assert(((Res >= -128) && (Res <= 127)) && "Unexpected operand value!"); + + return (Res & 0xff); +} + +unsigned +XtensaMCCodeEmitter::getImm8_sh8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = MO.getImm(); + + assert(((Res >= -32768) && (Res <= 32512) && ((Res & 0xff) == 0)) && + "Unexpected operand value!"); + + return (Res & 0xffff); +} + +unsigned +XtensaMCCodeEmitter::getImm12OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = MO.getImm(); + + assert(((Res >= -2048) && (Res <= 2047)) && "Unexpected operand value!"); + + return (Res & 0xfff); +} + +unsigned +XtensaMCCodeEmitter::getUimm4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 15)) && "Unexpected operand value!"); + + return Res & 0xf; +} + +unsigned +XtensaMCCodeEmitter::getUimm5OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 31)) && "Unexpected operand value!"); + + return (Res & 0x1f); +} + +unsigned +XtensaMCCodeEmitter::getShimm1_31OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= 1) && (Res <= 31)) && "Unexpected operand value!"); + + return ((32 - Res) & 0x1f); +} + +unsigned +XtensaMCCodeEmitter::getImm1_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= 1) && (Res <= 16)) && "Unexpected operand value!"); + + return (Res - 1); +} + +unsigned +XtensaMCCodeEmitter::getImm1n_15OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= -1) && (Res <= 15) && (Res != 0)) && + "Unexpected operand value!"); + + if (Res < 0) + Res = 0; + + return Res; +} + +unsigned +XtensaMCCodeEmitter::getImm32n_95OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= -32) && (Res <= 95)) && "Unexpected operand value!"); + + return Res; +} + +unsigned +XtensaMCCodeEmitter::getImm8n_7OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= -8) && (Res <= 7)) && "Unexpected operand value!"); + + if (Res < 0) + return Res + 16; + + return Res; +} + +unsigned +XtensaMCCodeEmitter::getImm64n_4nOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + assert(((Res >= -64) && (Res <= -4) && ((Res & 0x3) == 0)) && + "Unexpected operand value!"); + + return Res & 0x3f; +} + +unsigned +XtensaMCCodeEmitter::getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long res = static_cast(MO.getImm()); + + assert(((res & 0x7) == 0) && "Unexpected operand value!"); + + return res; +} + +unsigned +XtensaMCCodeEmitter::getB4constOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + unsigned long Res = static_cast(MO.getImm()); + + switch (Res) { + case 0xffffffff: + Res = 0; + break; + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + case 10: + Res = 9; + break; + case 12: + Res = 10; + break; + case 16: + Res = 11; + break; + case 32: + Res = 12; + break; + case 64: + Res = 13; + break; + case 128: + Res = 14; + break; + case 256: + Res = 15; + break; + default: + llvm_unreachable("Unexpected operand value!"); + } + + return Res; +} + +unsigned +XtensaMCCodeEmitter::getB4constuOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long Res = static_cast(MO.getImm()); + + switch (Res) { + case 32768: + Res = 0; + break; + case 65536: + Res = 1; + break; + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + case 10: + Res = 9; + break; + case 12: + Res = 10; + break; + case 16: + Res = 11; + break; + case 32: + Res = 12; + break; + case 64: + Res = 13; + break; + case 128: + Res = 14; + break; + case 256: + Res = 15; + break; + default: + llvm_unreachable("Unexpected operand value!"); + } + + return Res; +} + +unsigned XtensaMCCodeEmitter::getShimmSeimm7_22OpValue( + const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + long res = static_cast(MO.getImm()); + + res -= 7; + assert(((res & 0xf) == res) && "Unexpected operand value!"); + + return res; +} + +#include "XtensaGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCExpr.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCExpr.cpp new file mode 100644 index 0000000000000..5baf0d5a1f5d7 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCExpr.cpp @@ -0,0 +1,62 @@ +//===-- XtensaMCExpr.cpp - Xtensa specific MC expression classes ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation of the assembly expression modifiers +// accepted by the Xtensa architecture +// +//===----------------------------------------------------------------------===// + +#include "XtensaMCExpr.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbolELF.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Object/ELF.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensamcexpr" + +const XtensaMCExpr *XtensaMCExpr::create(const MCExpr *Expr, VariantKind Kind, + MCContext &Ctx) { + return new (Ctx) XtensaMCExpr(Expr, Kind); +} + +void XtensaMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { + bool HasVariant = getKind() != VK_Xtensa_None; + if (HasVariant) + OS << '%' << getVariantKindName(getKind()) << '('; + Expr->print(OS, MAI); + if (HasVariant) + OS << ')'; +} + +bool XtensaMCExpr::evaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout, + const MCFixup *Fixup) const { + return getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup); +} + +void XtensaMCExpr::visitUsedExpr(MCStreamer &Streamer) const { + Streamer.visitUsedExpr(*getSubExpr()); +} + +XtensaMCExpr::VariantKind XtensaMCExpr::getVariantKindForName(StringRef name) { + return StringSwitch(name).Default( + VK_Xtensa_Invalid); +} + +StringRef XtensaMCExpr::getVariantKindName(VariantKind Kind) { + switch (Kind) { + default: + llvm_unreachable("Invalid ELF symbol kind"); + } +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCExpr.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCExpr.h new file mode 100644 index 0000000000000..6bdf048106634 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCExpr.h @@ -0,0 +1,57 @@ +//===-- XtensaMCExpr.h - Xtensa specific MC expression classes --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes Xtensa-specific MCExprs +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_Xtensa_MCTARGETDESC_XtensaMCEXPR_H +#define LLVM_LIB_TARGET_Xtensa_MCTARGETDESC_XtensaMCEXPR_H + +#include "llvm/MC/MCExpr.h" + +namespace llvm { + +class StringRef; +class XtensaMCExpr : public MCTargetExpr { +public: + enum VariantKind { VK_Xtensa_None, VK_Xtensa_Invalid }; + +private: + const MCExpr *Expr; + const VariantKind Kind; + + explicit XtensaMCExpr(const MCExpr *Expr, VariantKind Kind) + : Expr(Expr), Kind(Kind) {} + +public: + static const XtensaMCExpr *create(const MCExpr *Expr, VariantKind Kind, + MCContext &Ctx); + + VariantKind getKind() const { return Kind; } + + const MCExpr *getSubExpr() const { return Expr; } + + void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override; + bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout, + const MCFixup *Fixup) const override; + void visitUsedExpr(MCStreamer &Streamer) const override; + MCFragment *findAssociatedFragment() const override { + return getSubExpr()->findAssociatedFragment(); + } + + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {} + + static VariantKind getVariantKindForName(StringRef name); + static StringRef getVariantKindName(VariantKind Kind); +}; + +} // end namespace llvm. + +#endif diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp new file mode 100644 index 0000000000000..37e00cd7261b3 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp @@ -0,0 +1,113 @@ +//===-- XtensaMCTargetDesc.cpp - Xtebsa target descriptions ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +#include "XtensaMCTargetDesc.h" +#include "XtensaInstPrinter.h" +#include "XtensaMCAsmInfo.h" +#include "XtensaTargetStreamer.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCDwarf.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +#define GET_INSTRINFO_MC_DESC +#include "XtensaGenInstrInfo.inc" + +#define GET_REGINFO_MC_DESC +#include "XtensaGenRegisterInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "XtensaGenSubtargetInfo.inc" + +using namespace llvm; + +static MCAsmInfo *createXtensaMCAsmInfo(const MCRegisterInfo &MRI, + const Triple &TT) { + MCAsmInfo *MAI = new XtensaMCAsmInfo(TT); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa( + nullptr, MRI.getDwarfRegNum(Xtensa::SP, true), 0); + MAI->addInitialFrameState(Inst); + return MAI; +} + +static MCInstrInfo *createXtensaMCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitXtensaMCInstrInfo(X); + return X; +} + +static MCInstPrinter *createXtensaMCInstPrinter(const Triple &TT, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI) { + return new XtensaInstPrinter(MAI, MII, MRI); +} + +static MCRegisterInfo *createXtensaMCRegisterInfo(const Triple &TT) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitXtensaMCRegisterInfo(X, Xtensa::A0); + return X; +} + +static MCSubtargetInfo * +createXtensaMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { + return createXtensaMCSubtargetInfoImpl(TT, CPU, FS); +} + +static MCTargetStreamer * +createXtensaAsmTargetStreamer(MCStreamer &S, formatted_raw_ostream &OS, + MCInstPrinter *InstPrint, bool isVerboseAsm) { + return new XtensaTargetAsmStreamer(S, OS); +} + +static MCTargetStreamer * +createXtensaObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { + return new XtensaTargetELFStreamer(S); +} + +extern "C" void LLVMInitializeXtensaTargetMC() { + // Register the MCAsmInfo. + TargetRegistry::RegisterMCAsmInfo(TheXtensaTarget, createXtensaMCAsmInfo); + + // Register the MCCodeEmitter. + TargetRegistry::RegisterMCCodeEmitter(TheXtensaTarget, + createXtensaMCCodeEmitter); + + // Register the MCInstrInfo. + TargetRegistry::RegisterMCInstrInfo(TheXtensaTarget, createXtensaMCInstrInfo); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(TheXtensaTarget, + createXtensaMCInstPrinter); + + // Register the MCRegisterInfo. + TargetRegistry::RegisterMCRegInfo(TheXtensaTarget, + createXtensaMCRegisterInfo); + + // Register the MCSubtargetInfo. + TargetRegistry::RegisterMCSubtargetInfo(TheXtensaTarget, + createXtensaMCSubtargetInfo); + + // Register the MCAsmBackend. + TargetRegistry::RegisterMCAsmBackend(TheXtensaTarget, + createXtensaMCAsmBackend); + + // Register the asm target streamer. + TargetRegistry::RegisterAsmTargetStreamer(TheXtensaTarget, + createXtensaAsmTargetStreamer); + + // Register the ELF target streamer. + TargetRegistry::RegisterObjectTargetStreamer( + TheXtensaTarget, createXtensaObjectTargetStreamer); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.h new file mode 100644 index 0000000000000..23fd1b77fd481 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.h @@ -0,0 +1,59 @@ +//===-- XtensaMCTargetDesc.h - Xtensa Target Descriptions -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides Xtensa specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAMCTARGETDESC_H +#define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAMCTARGETDESC_H +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/TargetRegistry.h" + +namespace llvm { + +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCObjectTargetWriter; +class MCObjectWriter; +class MCRegisterInfo; +class MCSubtargetInfo; +class StringRef; +class Target; +class raw_ostream; + +extern Target TheXtensaTarget; + +MCCodeEmitter *createXtensaMCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + MCContext &Ctx); + +MCAsmBackend *createXtensaMCAsmBackend(const Target &T, + const MCSubtargetInfo &STI, + const MCRegisterInfo &MRI, + const MCTargetOptions &Options); +std::unique_ptr +createXtensaObjectWriter(uint8_t OSABI, bool IsLittleEndian); +} // end namespace llvm + +// Defines symbolic names for Xtensa registers. +// This defines a mapping from register name to register number. +#define GET_REGINFO_ENUM +#include "XtensaGenRegisterInfo.inc" + +// Defines symbolic names for the Xtensa instructions. +#define GET_INSTRINFO_ENUM +#include "XtensaGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_ENUM +#include "XtensaGenSubtargetInfo.inc" + +#endif /* LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAMCTARGETDESC_H */ diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp new file mode 100644 index 0000000000000..9971325862525 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp @@ -0,0 +1,96 @@ +//===-- XtensaTargetStreamer.cpp - Xtensa Target Streamer Methods---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===--------------------------------------------------------------------===// +// +// This file provides Xtensa specific target streamer methods. +// +//===--------------------------------------------------------------------===// + +#include "XtensaTargetStreamer.h" +#include "XtensaInstPrinter.h" +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCObjectFileInfo.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/Support/FormattedStream.h" + +using namespace llvm; + +XtensaTargetStreamer::XtensaTargetStreamer(MCStreamer &S) + : MCTargetStreamer(S) {} + +XtensaTargetAsmStreamer::XtensaTargetAsmStreamer(MCStreamer &S, + formatted_raw_ostream &OS) + : XtensaTargetStreamer(S), OS(OS) {} + +void XtensaTargetAsmStreamer::emitLiteral(std::string str) { OS << str; } + +XtensaTargetELFStreamer::XtensaTargetELFStreamer(MCStreamer &S) + : XtensaTargetStreamer(S) {} + +void XtensaTargetELFStreamer::emitLiteralLabel(MCSymbol *LblSym, SMLoc L) { + MCContext &Context = getStreamer().getContext(); + MCStreamer &OutStreamer = getStreamer(); + MCSectionELF *CS = (MCSectionELF *)OutStreamer.getCurrentSectionOnly(); + std::string CSectionName = CS->getSectionName(); + std::size_t Pos = CSectionName.find(".text"); + std::string SectionName; + if (Pos != std::string::npos) { + SectionName = ".literal"; + SectionName += CSectionName.substr(Pos); + } else { + SectionName = CSectionName; + SectionName += ".literal"; + } + + MCSection *ConstSection = Context.getELFSection( + SectionName, ELF::SHT_PROGBITS, ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); + ConstSection->setAlignment(Align(4)); + + OutStreamer.PushSection(); + OutStreamer.SwitchSection(ConstSection); + OutStreamer.EmitLabel(LblSym, L); + OutStreamer.PopSection(); +} + +void XtensaTargetELFStreamer::emitLiteral(MCSymbol *LblSym, const MCExpr *Value, + SMLoc L) { + MCStreamer &OutStreamer = getStreamer(); + + OutStreamer.EmitLabel(LblSym, L); + OutStreamer.EmitValue(Value, 4, L); +} + +void XtensaTargetELFStreamer::emitLiteral(const MCExpr *Value, SMLoc L) { + MCContext &Context = getStreamer().getContext(); + MCStreamer &OutStreamer = getStreamer(); + MCSectionELF *CS = (MCSectionELF *)OutStreamer.getCurrentSectionOnly(); + std::string CSectionName = CS->getSectionName(); + std::size_t Pos = CSectionName.find(".text"); + std::string SectionName; + if (Pos != std::string::npos) { + SectionName = ".literal"; + SectionName += CSectionName.substr(Pos); + } else { + SectionName = CSectionName; + SectionName += ".literal"; + } + + MCSection *ConstSection = Context.getELFSection( + SectionName, ELF::SHT_PROGBITS, ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); + + OutStreamer.PushSection(); + OutStreamer.SwitchSection(ConstSection); + OutStreamer.EmitValue(Value, 4, L); + OutStreamer.PopSection(); +} + +MCELFStreamer &XtensaTargetELFStreamer::getStreamer() { + return static_cast(Streamer); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h new file mode 100644 index 0000000000000..d36820f5dcf68 --- /dev/null +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h @@ -0,0 +1,50 @@ +//===-- XtensaTargetStreamer.h - Xtensa Target Streamer -------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSATARGETSTREAMER_H +#define LLVM_LIB_TARGET_XTENSA_XTENSATARGETSTREAMER_H + +#include "XtensaConstantPoolValue.h" +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/Support/SMLoc.h" + +namespace llvm { +class XtensaTargetStreamer : public MCTargetStreamer { +public: + XtensaTargetStreamer(MCStreamer &S); + virtual void emitLiteral(MCSymbol *LblSym, const MCExpr *Value, SMLoc L) = 0; + virtual void emitLiteralLabel(MCSymbol *LblSym, SMLoc L) = 0; + virtual void emitLiteral(const MCExpr *Value, SMLoc L) = 0; + virtual void emitLiteral(std::string str) = 0; +}; + +class XtensaTargetAsmStreamer : public XtensaTargetStreamer { + formatted_raw_ostream &OS; + +public: + XtensaTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS); + void emitLiteral(MCSymbol *LblSym, const MCExpr *Value, SMLoc L) override {} + void emitLiteralLabel(MCSymbol *LblSym, SMLoc L) override {} + void emitLiteral(const MCExpr *Value, SMLoc L) override {} + void emitLiteral(std::string str) override; +}; + +class XtensaTargetELFStreamer : public XtensaTargetStreamer { +public: + XtensaTargetELFStreamer(MCStreamer &S); + MCELFStreamer &getStreamer(); + void emitLiteral(MCSymbol *LblSym, const MCExpr *Value, SMLoc L) override; + void emitLiteralLabel(MCSymbol *LblSym, SMLoc L) override; + void emitLiteral(const MCExpr *Value, SMLoc L) override; + void emitLiteral(std::string str) override {} +}; +} // end namespace llvm + +#endif diff --git a/llvm/lib/Target/Xtensa/TargetInfo/CMakeLists.txt b/llvm/lib/Target/Xtensa/TargetInfo/CMakeLists.txt new file mode 100644 index 0000000000000..a43483560153e --- /dev/null +++ b/llvm/lib/Target/Xtensa/TargetInfo/CMakeLists.txt @@ -0,0 +1,5 @@ +include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +add_llvm_library(LLVMXtensaInfo + XtensaTargetInfo.cpp + ) diff --git a/llvm/lib/Target/Xtensa/TargetInfo/LLVMBuild.txt b/llvm/lib/Target/Xtensa/TargetInfo/LLVMBuild.txt new file mode 100644 index 0000000000000..ce837c1607d3a --- /dev/null +++ b/llvm/lib/Target/Xtensa/TargetInfo/LLVMBuild.txt @@ -0,0 +1,16 @@ +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = XtensaInfo +parent = Xtensa +required_libraries = Support +add_to_library_groups = Xtensa diff --git a/llvm/lib/Target/Xtensa/TargetInfo/XtensaTargetInfo.cpp b/llvm/lib/Target/Xtensa/TargetInfo/XtensaTargetInfo.cpp new file mode 100644 index 0000000000000..ec30e3968a5ef --- /dev/null +++ b/llvm/lib/Target/Xtensa/TargetInfo/XtensaTargetInfo.cpp @@ -0,0 +1,19 @@ +//===-- XtensaTargetInfo.cpp - Xtensa Target Implementation ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; +namespace llvm { +Target TheXtensaTarget; +} +extern "C" void LLVMInitializeXtensaTargetInfo() { + RegisterTarget X(TheXtensaTarget, "xtensa", "Xtensa 32", + "XTENSA"); +} diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h new file mode 100644 index 0000000000000..e5dc1f9030d73 --- /dev/null +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -0,0 +1,30 @@ +//===-- Xtensa.h - Top-level interface for Xtensa representation ----*- C++ +//-*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===--------------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in +// the LLVM Xtensa back-end. +// +//===--------------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSA_H +#define LLVM_LIB_TARGET_XTENSA_XTENSA_H + +#include "MCTargetDesc/XtensaMCTargetDesc.h" +#include "llvm/PassRegistry.h" + +namespace llvm { +class XtensaTargetMachine; +class FunctionPass; + +FunctionPass *createXtensaISelDag(XtensaTargetMachine &TM, + CodeGenOpt::Level OptLevel); +FunctionPass *createXtensaSizeReductionPass(); +} // namespace llvm +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSA_H */ diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td new file mode 100644 index 0000000000000..81946aa87112b --- /dev/null +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -0,0 +1,136 @@ +//===- Xtensa.td - Describe the Xtensa Target Machine -----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Target-independent interfaces +//===----------------------------------------------------------------------===// + +include "llvm/Target/Target.td" + +//===----------------------------------------------------------------------===// +// Subtarget Features. +//===----------------------------------------------------------------------===// +def FeatureDensity : SubtargetFeature<"density", "HasDensity", "true", + "Enable Density instructions">; +def HasDensity : Predicate<"Subtarget->hasDensity()">, + AssemblerPredicate<"FeatureDensity">; + +def FeatureSingleFloat : SubtargetFeature<"fp", "HasSingleFloat", "true", + "Enable Xtensa Single FP instructions">; +def HasSingleFloat : Predicate<"Subtarget->hasSingleFloat()">, + AssemblerPredicate<"FeatureSingleFloat">; + +def FeatureLoop : SubtargetFeature<"loop", "HasLoop", "true", + "Enable Xtensa Loop extension">; +def HasLoop : Predicate<"Subtarget->hasLoop()">, + AssemblerPredicate<"FeatureLoop">; + +def FeatureMAC16 : SubtargetFeature<"mac16", "HasMAC16", "true", + "Enable Xtensa MAC16 instructions">; +def HasMAC16 : Predicate<"Subtarget->hasMAC16()">, + AssemblerPredicate<"FeatureMAC16">; + +def FeatureWindowed : SubtargetFeature<"windowed", "HasWindowed", "true", + "Enable Xtensa Windowed Register option">; +def HasWindowed : Predicate<"Subtarget->hasWindowed()">, + AssemblerPredicate<"FeatureWindowed">; + +def FeatureBoolean : SubtargetFeature<"bool", "HasBoolean", "true", + "Enable Xtensa Boolean extension">; +def HasBoolean : Predicate<"Subtarget->hasBoolean()">, + AssemblerPredicate<"FeatureBoolean">; + +def FeatureSEXT : SubtargetFeature<"sext", "HasSEXT", "true", + "Enable Xtensa Sign Extend option">; +def HasSEXT : Predicate<"Subtarget->hasSEXT()">, + AssemblerPredicate<"FeatureSEXT">; + +def FeatureNSA : SubtargetFeature<"nsa", "HasNSA", "true", + "Enable Xtensa NSA option">; +def HasNSA : Predicate<"Subtarget->hasNSA()">, + AssemblerPredicate<"FeatureNSA">; + +def FeatureMul32 : SubtargetFeature<"mul32", "HasMul32", "true", + "Enable Xtensa Mul32 option">; +def HasMul32 : Predicate<"Subtarget->hasMul32()">, + AssemblerPredicate<"FeatureMul32">; + +def FeatureMul32High : SubtargetFeature<"mul32high", "HasMul32High", "true", + "Enable Xtensa Mul32High option">; +def HasMul32High : Predicate<"Subtarget->hasMul32High()">, + AssemblerPredicate<"FeatureMul32High">; + +def FeatureDiv32 : SubtargetFeature<"div32", "HasDiv32", "true", + "Enable Xtensa Div32 option">; +def HasDiv32 : Predicate<"Subtarget->hasDiv32()">, + AssemblerPredicate<"FeatureDiv32">; + +def FeatureS32C1I : SubtargetFeature<"s32c1i", "HasS32C1I", "true", + "Enable Xtensa S32C1I option">; +def HasS32C1I : Predicate<"Subtarget->hasS32C1I()">, + AssemblerPredicate<"FeatureS32C1I">; + +def FeatureTHREADPTR : SubtargetFeature<"threadptr", "HasTHREADPTR", "true", + "Enable Xtensa THREADPTR option">; +def HasTHREADPTR : Predicate<"Subtarget->hasTHREADPTR()">, + AssemblerPredicate<"FeatureTHREADPTR">; + +//===----------------------------------------------------------------------===// +// Xtensa supported processors. +//===----------------------------------------------------------------------===// +class Proc Features> + : Processor; + +def : Proc<"generic", []>; +def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, + FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32]>; +def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul32]>; +def : Proc<"esp32-S2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, FeatureDiv32]>; + +//===----------------------------------------------------------------------===// +// Register File Description +//===----------------------------------------------------------------------===// + +include "XtensaRegisterInfo.td" + +//===----------------------------------------------------------------------===// +// Calling Convention Description +//===----------------------------------------------------------------------===// + +include "XtensaCallingConv.td" + +//===----------------------------------------------------------------------===// +// Instruction Descriptions +//===----------------------------------------------------------------------===// + +include "XtensaInstrInfo.td" + +def XtensaInstrInfo : InstrInfo; + +//===----------------------------------------------------------------------===// +// Target Declaration +//===----------------------------------------------------------------------===// + +def XtensaAsmParser : AsmParser { + let ShouldEmitMatchRegisterAltName = 1; +} + +def XtensaInstPrinter : AsmWriter +{ + string AsmWriterClassName = "InstPrinter"; + bit isMCAsmWriter = 1; +} + +def Xtensa : Target +{ + let InstructionSet = XtensaInstrInfo; + let AssemblyWriters = [XtensaInstPrinter]; + let AssemblyParsers = [XtensaAsmParser]; +} + diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp new file mode 100644 index 0000000000000..6b6a5e79570dc --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -0,0 +1,276 @@ +//===- XtensaAsmPrinter.cpp Xtensa LLVM Assembly Printer ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal representation +// of machine-dependent LLVM code to GAS-format Xtensa assembly language. +// +//===----------------------------------------------------------------------===// + +#include "XtensaAsmPrinter.h" +#include "MCTargetDesc/XtensaInstPrinter.h" +#include "XtensaConstantPoolValue.h" +#include "XtensaMCInstLower.h" +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/CodeGen/MachineModuleInfoImpls.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInstBuilder.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCSymbolELF.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +static MCSymbolRefExpr::VariantKind +getModifierVariantKind(XtensaCP::XtensaCPModifier Modifier) { + switch (Modifier) { + case XtensaCP::no_modifier: + return MCSymbolRefExpr::VK_None; + case XtensaCP::TPOFF: + return MCSymbolRefExpr::VK_TPOFF; + } + llvm_unreachable("Invalid XtensaCPModifier!"); +} + +void XtensaAsmPrinter::EmitInstruction(const MachineInstr *MI) { + XtensaMCInstLower Lower(MF->getContext(), *this); + MCInst LoweredMI; + unsigned Opc = MI->getOpcode(); + + switch (Opc) { + case Xtensa::BR_JT: { + EmitToStreamer( + *OutStreamer, + MCInstBuilder(Xtensa::JX).addReg(MI->getOperand(0).getReg())); + return; + } + } + Lower.lower(MI, LoweredMI); + EmitToStreamer(*OutStreamer, LoweredMI); +} + +/// EmitConstantPool - Print to the current output stream assembly +/// representations of the constants in the constant pool MCP. This is +/// used to print out constants which have been "spilled to memory" by +/// the code generator. +void XtensaAsmPrinter::EmitConstantPool() { + const Function &F = MF->getFunction(); + const MachineConstantPool *MCP = MF->getConstantPool(); + const std::vector &CP = MCP->getConstants(); + if (CP.empty()) + return; + + for (unsigned i = 0, e = CP.size(); i != e; ++i) { + const MachineConstantPoolEntry &CPE = CP[i]; + + if (i == 0) { + if (OutStreamer->hasRawTextSupport()) { + OutStreamer->SwitchSection( + getObjFileLowering().SectionForGlobal(&F, TM)); + OutStreamer->EmitRawText("\t.literal_position\n"); + } else { + MCSectionELF *CS = + (MCSectionELF *)getObjFileLowering().SectionForGlobal(&F, TM); + std::string CSectionName = CS->getSectionName(); + std::size_t Pos = CSectionName.find(".text"); + std::string SectionName; + if (Pos != std::string::npos) { + if (Pos > 0) + SectionName = CSectionName.substr(0, Pos + 5); + else + SectionName = ""; + SectionName += ".literal"; + SectionName += CSectionName.substr(Pos + 5); + } else { + SectionName = CSectionName; + SectionName += ".literal"; + } + + MCSectionELF *S = + OutContext.getELFSection(SectionName, ELF::SHT_PROGBITS, + ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); + S->setAlignment(Align(4)); + OutStreamer->SwitchSection(S); + } + } + + if (CPE.isMachineConstantPoolEntry()) { + XtensaConstantPoolValue *ACPV = + static_cast(CPE.Val.MachineCPVal); + ACPV->setLabelId(i); + EmitMachineConstantPoolValue(CPE.Val.MachineCPVal); + } else { + MCSymbol *LblSym = GetCPISymbol(i); + // TODO find a better way to check whether we emit data to .s file + if (OutStreamer->hasRawTextSupport()) { + std::string str("\t.literal "); + str += LblSym->getName(); + str += ", "; + const Constant *C = CPE.Val.ConstVal; + + Type *Ty = C->getType(); + if (const auto *CFP = dyn_cast(C)) { + str += CFP->getValueAPF().bitcastToAPInt().toString(10, true); + } else if (const auto *CI = dyn_cast(C)) { + str += CI->getValue().toString(10, true); + } else if (isa(Ty)) { + const MCExpr *ME = lowerConstant(C); + const MCSymbolRefExpr &SRE = cast(*ME); + const MCSymbol &Sym = SRE.getSymbol(); + str += Sym.getName(); + } else { + unsigned NumElements; + if (isa(Ty)) + NumElements = Ty->getVectorNumElements(); + else + NumElements = Ty->getArrayNumElements(); + + for (unsigned I = 0; I < NumElements; I++) { + const Constant *CAE = C->getAggregateElement(I); + if (I > 0) + str += ", "; + if (const auto *CFP = dyn_cast(CAE)) { + str += CFP->getValueAPF().bitcastToAPInt().toString(10, true); + } else if (const auto *CI = dyn_cast(CAE)) { + str += CI->getValue().toString(10, true); + } + } + } + + OutStreamer->EmitRawText(str); + } else { + OutStreamer->EmitLabel(LblSym); + EmitGlobalConstant(getDataLayout(), CPE.Val.ConstVal); + } + } + } +} + +void XtensaAsmPrinter::EmitMachineConstantPoolValue( + MachineConstantPoolValue *MCPV) { + XtensaConstantPoolValue *ACPV = static_cast(MCPV); + + MCSymbol *MCSym; + if (ACPV->isBlockAddress()) { + const BlockAddress *BA = + cast(ACPV)->getBlockAddress(); + MCSym = GetBlockAddressSymbol(BA); + } else if (ACPV->isGlobalValue()) { + const GlobalValue *GV = cast(ACPV)->getGV(); + // TODO some modifiers + MCSym = getSymbol(GV); + } else if (ACPV->isMachineBasicBlock()) { + const MachineBasicBlock *MBB = cast(ACPV)->getMBB(); + MCSym = MBB->getSymbol(); + } else if (ACPV->isJumpTable()) { + unsigned idx = cast(ACPV)->getIndex(); + MCSym = this->GetJTISymbol(idx, false); + } else { + assert(ACPV->isExtSymbol() && "unrecognized constant pool value"); + XtensaConstantPoolSymbol *XtensaSym = cast(ACPV); + const char *Sym = XtensaSym->getSymbol(); + // TODO it's a trick to distinguish static references and generated rodata + // references Some clear method required + { + std::string SymName(Sym); + if (XtensaSym->isPrivateLinkage()) + SymName = ".L" + SymName; + MCSym = GetExternalSymbolSymbol(StringRef(SymName)); + } + } + + MCSymbol *LblSym = GetCPISymbol(ACPV->getLabelId()); + // TODO find a better way to check whether we emit data to .s file + if (OutStreamer->hasRawTextSupport()) { + std::string SymName("\t.literal "); + SymName += LblSym->getName(); + SymName += ", "; + SymName += MCSym->getName(); + + StringRef Modifier = ACPV->getModifierText(); + SymName += Modifier; + + OutStreamer->EmitRawText(SymName); + } else { + MCSymbolRefExpr::VariantKind VK = + getModifierVariantKind(ACPV->getModifier()); + + if (ACPV->getModifier() != XtensaCP::no_modifier) { + std::string SymName(MCSym->getName()); + MCSym = GetExternalSymbolSymbol(StringRef(SymName)); + } + + const MCExpr *Expr = MCSymbolRefExpr::create(MCSym, VK, OutContext); + uint64_t Size = getDataLayout().getTypeAllocSize(ACPV->getType()); + OutStreamer->EmitLabel(LblSym); + OutStreamer->EmitValue(Expr, Size); + } +} + +void XtensaAsmPrinter::printOperand(const MachineInstr *MI, int OpNo, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(OpNo); + // TODO look at target flags MO.getTargetFlags() to see if we should wrap this + // operand + switch (MO.getType()) { + case MachineOperand::MO_Register: + case MachineOperand::MO_Immediate: { + XtensaMCInstLower Lower(MF->getContext(), *this); + MCOperand MC(Lower.lowerOperand(MI->getOperand(OpNo))); + XtensaInstPrinter::printOperand(MC, O); + break; + } + case MachineOperand::MO_GlobalAddress: + O << *getSymbol(MO.getGlobal()); + break; + default: + llvm_unreachable(""); + } + + if (MO.getTargetFlags()) { + O << ")"; + } +} + +bool XtensaAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) { + if (ExtraCode && *ExtraCode == 'n') { + if (!MI->getOperand(OpNo).isImm()) + return true; + O << -int64_t(MI->getOperand(OpNo).getImm()); + } else { + printOperand(MI, OpNo, O); + } + return false; +} + +bool XtensaAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, + unsigned OpNo, + const char *ExtraCode, + raw_ostream &OS) { + XtensaInstPrinter::printAddress(MI->getOperand(OpNo).getReg(), + MI->getOperand(OpNo + 1).getImm(), OS); + return false; +} + +void XtensaAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum, + raw_ostream &OS) { + OS << '%' + << XtensaInstPrinter::getRegisterName(MI->getOperand(opNum).getReg()); + OS << "("; + OS << MI->getOperand(opNum + 1).getImm(); + OS << ")"; +} + +// Force static initialization. +extern "C" void LLVMInitializeXtensaAsmPrinter() { + RegisterAsmPrinter A(TheXtensaTarget); +} diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h new file mode 100644 index 0000000000000..f9c5fd57bd084 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h @@ -0,0 +1,48 @@ +//===- XtensaAsmPrinter.h - Xtensa LLVM Assembly Printer -------*- C++-*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// Xtensa Assembly printer class. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAASMPRINTER_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAASMPRINTER_H + +#include "XtensaTargetMachine.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/Support/Compiler.h" + +namespace llvm { +class MCStreamer; +class MachineBasicBlock; +class MachineInstr; +class Module; +class raw_ostream; + +class LLVM_LIBRARY_VISIBILITY XtensaAsmPrinter : public AsmPrinter { +private: +public: + XtensaAsmPrinter(TargetMachine &TM, std::unique_ptr Streamer) + : AsmPrinter(TM, std::move(Streamer)) {} + + // Override AsmPrinter. + StringRef getPassName() const override { return "Xtensa Assembly Printer"; } + void EmitInstruction(const MachineInstr *MI) override; + void EmitConstantPool() override; + void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override; + void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O); + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &OS) override; + void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS); +}; +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAASMPRINTER_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td new file mode 100644 index 0000000000000..b4d6dde14b506 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -0,0 +1,45 @@ +//===- XtensaCallingConv.td - Calling Conventions for Xtensa ----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------------===// +// This describes the calling conventions for the Xtensa ABI. +//===---------------------------------------------------------------------------===// + +/// CCIfAlign - Match of the original alignment of the arg +class CCIfAlign: + CCIf; + +//===----------------------------------------------------------------------===// +// Xtensa return value calling convention +//===----------------------------------------------------------------------===// +def RetCC_Xtensa: CallingConv<[ + CCIfType<[i1, i8, i16], CCPromoteToType>, + CCIfType<[f32], CCBitConvertToType>, + + //First two return values go in a2, a3, a4, a5 + CCIfType<[i32], CCAssignToReg<[A2, A3, A4, A5]>>, + CCIfType<[f32], CCAssignToReg<[A2, A3, A4, A5]>>, + CCIfType<[i64], CCAssignToRegWithShadow<[A2, A4], [A3, A5]>> +]>; + +//===----------------------------------------------------------------------===// +// Callee-saved register lists. +//===----------------------------------------------------------------------===// + +def CSR_Xtensa: CalleeSavedRegs<(add A0, A12, A13, A14, A15)>; + +//===----------------------------------------------------------------------===// + +def RetCCW_Xtensa: CallingConv<[ + CCIfType<[i1, i8, i16], CCPromoteToType>, + CCIfType<[f32], CCBitConvertToType>, + + //First two return values go in a10, a11, a12, a13 + CCIfType<[i32], CCAssignToReg<[A10, A11, A12, A13]>>, + CCIfType<[f32], CCAssignToReg<[A10, A11, A12, A13]>>, + CCIfType<[i64], CCAssignToRegWithShadow<[A10, A12], [A11, A13]>> +]>; diff --git a/llvm/lib/Target/Xtensa/XtensaConstantPoolValue.cpp b/llvm/lib/Target/Xtensa/XtensaConstantPoolValue.cpp new file mode 100644 index 0000000000000..62fabab5a5fc0 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaConstantPoolValue.cpp @@ -0,0 +1,233 @@ +//===- XtensaConstantPoolValue.cpp - Xtensa constantpool value ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Xtensa specific constantpool value class. +// +//===----------------------------------------------------------------------===// + +#include "XtensaConstantPoolValue.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/raw_ostream.h" +#include +using namespace llvm; + +XtensaConstantPoolValue::XtensaConstantPoolValue( + Type *Ty, unsigned id, XtensaCP::XtensaCPKind kind, bool addCurrentAddress, + XtensaCP::XtensaCPModifier modifier) + : MachineConstantPoolValue(Ty), LabelId(id), Kind(kind), Modifier(modifier), + AddCurrentAddress(addCurrentAddress) {} + +XtensaConstantPoolValue::XtensaConstantPoolValue( + LLVMContext &C, unsigned id, XtensaCP::XtensaCPKind kind, + bool addCurrentAddress, XtensaCP::XtensaCPModifier modifier) + : MachineConstantPoolValue((Type *)Type::getInt32Ty(C)), LabelId(id), + Kind(kind), Modifier(modifier), AddCurrentAddress(addCurrentAddress) {} + +XtensaConstantPoolValue::~XtensaConstantPoolValue() {} + +StringRef XtensaConstantPoolValue::getModifierText() const { + switch (Modifier) { + case XtensaCP::no_modifier: + return ""; + case XtensaCP::TPOFF: + return "@TPOFF"; + } + llvm_unreachable("Unknown modifier!"); +} + +int XtensaConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) { + llvm_unreachable("Shouldn't be calling this directly!"); +} + +void XtensaConstantPoolValue::addSelectionDAGCSEId(FoldingSetNodeID &ID) { + ID.AddInteger(LabelId); +} + +bool XtensaConstantPoolValue::hasSameValue(XtensaConstantPoolValue *ACPV) { + if (ACPV->Kind == Kind) { + if (ACPV->LabelId == LabelId) + return true; + // Two PC relative constpool entries containing the same GV address or + // external symbols. FIXME: What about blockaddress? + if (Kind == XtensaCP::CPValue || Kind == XtensaCP::CPExtSymbol) + return true; + } + return false; +} + +void XtensaConstantPoolValue::dump() const { errs() << " " << *this; } + +void XtensaConstantPoolValue::print(raw_ostream &O) const {} + +//===----------------------------------------------------------------------===// +// XtensaConstantPoolConstant +//===----------------------------------------------------------------------===// + +XtensaConstantPoolConstant::XtensaConstantPoolConstant( + Type *Ty, const Constant *C, unsigned ID, XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress) + : XtensaConstantPoolValue((Type *)C->getType(), ID, Kind, + AddCurrentAddress), + CVal(C) {} + +XtensaConstantPoolConstant::XtensaConstantPoolConstant( + const Constant *C, unsigned ID, XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress) + : XtensaConstantPoolValue((Type *)C->getType(), ID, Kind, + AddCurrentAddress), + CVal(C) {} + +XtensaConstantPoolConstant * +XtensaConstantPoolConstant::Create(const Constant *C, unsigned ID, + XtensaCP::XtensaCPKind Kind) { + return new XtensaConstantPoolConstant(C, ID, Kind, false); +} + +XtensaConstantPoolConstant * +XtensaConstantPoolConstant::Create(const Constant *C, unsigned ID, + XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress) { + return new XtensaConstantPoolConstant(C, ID, Kind, AddCurrentAddress); +} + +const GlobalValue *XtensaConstantPoolConstant::getGV() const { + return dyn_cast_or_null(CVal); +} + +const BlockAddress *XtensaConstantPoolConstant::getBlockAddress() const { + return dyn_cast_or_null(CVal); +} + +int XtensaConstantPoolConstant::getExistingMachineCPValue( + MachineConstantPool *CP, unsigned Alignment) { + return getExistingMachineCPValueImpl(CP, + Alignment); +} + +bool XtensaConstantPoolConstant::hasSameValue(XtensaConstantPoolValue *ACPV) { + const XtensaConstantPoolConstant *ACPC = + dyn_cast(ACPV); + return ACPC && ACPC->CVal == CVal && + XtensaConstantPoolValue::hasSameValue(ACPV); +} + +void XtensaConstantPoolConstant::addSelectionDAGCSEId(FoldingSetNodeID &ID) { + ID.AddPointer(CVal); + XtensaConstantPoolValue::addSelectionDAGCSEId(ID); +} + +void XtensaConstantPoolConstant::print(raw_ostream &O) const { + O << CVal->getName(); + XtensaConstantPoolValue::print(O); +} + +XtensaConstantPoolSymbol::XtensaConstantPoolSymbol( + LLVMContext &C, const char *s, unsigned id, bool AddCurrentAddress, + bool PrivLinkage, XtensaCP::XtensaCPModifier Modifier) + : XtensaConstantPoolValue(C, id, XtensaCP::CPExtSymbol, AddCurrentAddress, + Modifier), + S(s), PrivateLinkage(PrivLinkage) {} + +XtensaConstantPoolSymbol * +XtensaConstantPoolSymbol::Create(LLVMContext &C, const char *s, unsigned ID, + bool PrivLinkage, + XtensaCP::XtensaCPModifier Modifier) + +{ + return new XtensaConstantPoolSymbol(C, s, ID, false, PrivLinkage, Modifier); +} + +int XtensaConstantPoolSymbol::getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) { + return getExistingMachineCPValueImpl(CP, Alignment); +} + +bool XtensaConstantPoolSymbol::hasSameValue(XtensaConstantPoolValue *ACPV) { + const XtensaConstantPoolSymbol *ACPS = + dyn_cast(ACPV); + return ACPS && ACPS->S == S && XtensaConstantPoolValue::hasSameValue(ACPV); +} + +void XtensaConstantPoolSymbol::addSelectionDAGCSEId(FoldingSetNodeID &ID) { + ID.AddString(S); + XtensaConstantPoolValue::addSelectionDAGCSEId(ID); +} + +void XtensaConstantPoolSymbol::print(raw_ostream &O) const { + O << S; + XtensaConstantPoolValue::print(O); +} + +XtensaConstantPoolMBB::XtensaConstantPoolMBB(LLVMContext &C, + const MachineBasicBlock *mbb, + unsigned id) + : XtensaConstantPoolValue(C, 0, XtensaCP::CPMachineBasicBlock, false), + MBB(mbb) {} + +XtensaConstantPoolMBB * +XtensaConstantPoolMBB::Create(LLVMContext &C, const MachineBasicBlock *mbb, + unsigned idx) { + return new XtensaConstantPoolMBB(C, mbb, idx); +} + +int XtensaConstantPoolMBB::getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) { + return getExistingMachineCPValueImpl(CP, Alignment); +} + +bool XtensaConstantPoolMBB::hasSameValue(XtensaConstantPoolValue *ACPV) { + const XtensaConstantPoolMBB *ACPMBB = dyn_cast(ACPV); + return ACPMBB && ACPMBB->MBB == MBB && + XtensaConstantPoolValue::hasSameValue(ACPV); +} + +void XtensaConstantPoolMBB::addSelectionDAGCSEId(FoldingSetNodeID &ID) { + ID.AddPointer(MBB); + XtensaConstantPoolValue::addSelectionDAGCSEId(ID); +} + +void XtensaConstantPoolMBB::print(raw_ostream &O) const { + O << "BB#" << MBB->getNumber(); + XtensaConstantPoolValue::print(O); +} + +XtensaConstantPoolJumpTable::XtensaConstantPoolJumpTable(LLVMContext &C, + unsigned idx) + : XtensaConstantPoolValue(C, 0, XtensaCP::CPJumpTable, false), IDX(idx) {} + +XtensaConstantPoolJumpTable *XtensaConstantPoolJumpTable::Create(LLVMContext &C, + unsigned idx) { + return new XtensaConstantPoolJumpTable(C, idx); +} + +int XtensaConstantPoolJumpTable::getExistingMachineCPValue( + MachineConstantPool *CP, unsigned Alignment) { + return getExistingMachineCPValueImpl(CP, + Alignment); +} + +bool XtensaConstantPoolJumpTable::hasSameValue(XtensaConstantPoolValue *ACPV) { + const XtensaConstantPoolJumpTable *ACPJT = + dyn_cast(ACPV); + return ACPJT && ACPJT->IDX == IDX && + XtensaConstantPoolValue::hasSameValue(ACPV); +} + +void XtensaConstantPoolJumpTable::addSelectionDAGCSEId(FoldingSetNodeID &ID) {} + +void XtensaConstantPoolJumpTable::print(raw_ostream &O) const { + O << "JT" << IDX; + XtensaConstantPoolValue::print(O); +} diff --git a/llvm/lib/Target/Xtensa/XtensaConstantPoolValue.h b/llvm/lib/Target/Xtensa/XtensaConstantPoolValue.h new file mode 100644 index 0000000000000..858981942ad09 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaConstantPoolValue.h @@ -0,0 +1,280 @@ +//===- XtensaConstantPoolValue.h - Xtensa constantpool value ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Xtensa specific constantpool value class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSACONSTANTPOOLVALUE_H +#define LLVM_LIB_TARGET_XTENSA_XTENSACONSTANTPOOLVALUE_H + +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include + +namespace llvm { + +class BlockAddress; +class Constant; +class GlobalValue; +class LLVMContext; +class MachineBasicBlock; + +namespace XtensaCP { +enum XtensaCPKind { + CPValue, + CPExtSymbol, + CPBlockAddress, + CPMachineBasicBlock, + CPJumpTable +}; + +enum XtensaCPModifier { + no_modifier, // None + TPOFF // Thread Pointer Offset +}; +} // namespace XtensaCP + +/// XtensaConstantPoolValue - Xtensa specific constantpool value. This is used +/// to represent PC-relative displacement between the address of the load +/// instruction and the constant being loaded, i.e. (&GV-(LPIC+8)). +class XtensaConstantPoolValue : public MachineConstantPoolValue { + unsigned LabelId; // Label id of the load. + XtensaCP::XtensaCPKind Kind; // Kind of constant. + XtensaCP::XtensaCPModifier Modifier; // GV modifier + bool AddCurrentAddress; + +protected: + XtensaConstantPoolValue( + Type *Ty, unsigned id, XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress, + XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier); + + XtensaConstantPoolValue( + LLVMContext &C, unsigned id, XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress, + XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier); + + template + int getExistingMachineCPValueImpl(MachineConstantPool *CP, + unsigned Alignment) { + unsigned AlignMask = Alignment - 1; + const std::vector &Constants = CP->getConstants(); + for (unsigned i = 0, e = Constants.size(); i != e; ++i) { + if (Constants[i].isMachineConstantPoolEntry() && + (Constants[i].getAlignment() & AlignMask) == 0) { + XtensaConstantPoolValue *CPV = + (XtensaConstantPoolValue *)Constants[i].Val.MachineCPVal; + if (Derived *APC = dyn_cast(CPV)) + if (cast(this)->equals(APC)) + return i; + } + } + + return -1; + } + +public: + ~XtensaConstantPoolValue() override; + + XtensaCP::XtensaCPModifier getModifier() const { return Modifier; } + bool hasModifier() const { return Modifier != XtensaCP::no_modifier; } + StringRef getModifierText() const; + + bool mustAddCurrentAddress() const { return AddCurrentAddress; } + + unsigned getLabelId() const { return LabelId; } + void setLabelId(unsigned id) { LabelId = id; } + + bool isGlobalValue() const { return Kind == XtensaCP::CPValue; } + bool isExtSymbol() const { return Kind == XtensaCP::CPExtSymbol; } + bool isBlockAddress() const { return Kind == XtensaCP::CPBlockAddress; } + bool isMachineBasicBlock() const { + return Kind == XtensaCP::CPMachineBasicBlock; + } + bool isJumpTable() const { return Kind == XtensaCP::CPJumpTable; } + + int getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) override; + + void addSelectionDAGCSEId(FoldingSetNodeID &ID) override; + + /// hasSameValue - Return true if this Xtensa constpool value can share the + /// same constantpool entry as another Xtensa constpool value. + virtual bool hasSameValue(XtensaConstantPoolValue *ACPV); + + bool equals(const XtensaConstantPoolValue *A) const { + return this->LabelId == A->LabelId && this->Modifier == A->Modifier; + } + + void print(raw_ostream &O) const override; + void print(raw_ostream *O) const { + if (O) + print(*O); + } + void dump() const; +}; + +inline raw_ostream &operator<<(raw_ostream &O, + const XtensaConstantPoolValue &V) { + V.print(O); + return O; +} + +/// XtensaConstantPoolConstant - Xtensa-specific constant pool values for +/// Constants, Functions, and BlockAddresses. +class XtensaConstantPoolConstant : public XtensaConstantPoolValue { + const Constant *CVal; // Constant being loaded. + + XtensaConstantPoolConstant(const Constant *C, unsigned ID, + XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress); + XtensaConstantPoolConstant(Type *Ty, const Constant *C, unsigned ID, + XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress); + +public: + static XtensaConstantPoolConstant *Create(const Constant *C, unsigned ID, + XtensaCP::XtensaCPKind Kind); + static XtensaConstantPoolConstant *Create(const Constant *C, unsigned ID, + XtensaCP::XtensaCPKind Kind, + bool AddCurrentAddress); + + const GlobalValue *getGV() const; + const BlockAddress *getBlockAddress() const; + + int getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) override; + + /// hasSameValue - Return true if this Xtensa constpool value can share the + /// same constantpool entry as another Xtensa constpool value. + bool hasSameValue(XtensaConstantPoolValue *ACPV) override; + + void addSelectionDAGCSEId(FoldingSetNodeID &ID) override; + + void print(raw_ostream &O) const override; + static bool classof(const XtensaConstantPoolValue *APV) { + return APV->isGlobalValue() || APV->isBlockAddress(); + } + + bool equals(const XtensaConstantPoolConstant *A) const { + return CVal == A->CVal && XtensaConstantPoolValue::equals(A); + } +}; + +/// XtensaConstantPoolSymbol - Xtensa-specific constantpool values for external +/// symbols. +class XtensaConstantPoolSymbol : public XtensaConstantPoolValue { + const std::string S; // ExtSymbol being loaded. + bool PrivateLinkage; + + XtensaConstantPoolSymbol( + LLVMContext &C, const char *s, unsigned id, bool AddCurrentAddress, + bool PrivLinkage, + XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier); + +public: + static XtensaConstantPoolSymbol * + Create(LLVMContext &C, const char *s, unsigned ID, bool PrivLinkage, + XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier); + + const char *getSymbol() const { return S.c_str(); } + + int getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) override; + + void addSelectionDAGCSEId(FoldingSetNodeID &ID) override; + + /// hasSameValue - Return true if this Xtensa constpool value can share the + /// same constantpool entry as another Xtensa constpool value. + bool hasSameValue(XtensaConstantPoolValue *ACPV) override; + + bool isPrivateLinkage() { return PrivateLinkage; } + + void print(raw_ostream &O) const override; + + static bool classof(const XtensaConstantPoolValue *ACPV) { + return ACPV->isExtSymbol(); + } + + bool equals(const XtensaConstantPoolSymbol *A) const { + return S == A->S && XtensaConstantPoolValue::equals(A); + } +}; + +/// XtensaConstantPoolMBB - Xtensa-specific constantpool value of a machine +/// basic block. +class XtensaConstantPoolMBB : public XtensaConstantPoolValue { + const MachineBasicBlock *MBB; // Machine basic block. + + XtensaConstantPoolMBB(LLVMContext &C, const MachineBasicBlock *mbb, + unsigned id); + +public: + static XtensaConstantPoolMBB * + Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID); + + const MachineBasicBlock *getMBB() const { return MBB; } + + int getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) override; + + void addSelectionDAGCSEId(FoldingSetNodeID &ID) override; + + /// hasSameValue - Return true if this Xtensa constpool value can share the + /// same constantpool entry as another Xtensa constpool value. + bool hasSameValue(XtensaConstantPoolValue *ACPV) override; + + void print(raw_ostream &O) const override; + + static bool classof(const XtensaConstantPoolValue *ACPV) { + return ACPV->isMachineBasicBlock(); + } + + bool equals(const XtensaConstantPoolMBB *A) const { + return MBB == A->MBB && XtensaConstantPoolValue::equals(A); + } +}; + +/// XtensaConstantPoolJumpTable - Xtensa-specific constantpool values for Jump +/// Table symbols. +class XtensaConstantPoolJumpTable : public XtensaConstantPoolValue { + unsigned IDX; // Jump Table Index. + + XtensaConstantPoolJumpTable(LLVMContext &C, unsigned idx); + +public: + static XtensaConstantPoolJumpTable *Create(LLVMContext &C, unsigned idx); + + unsigned getIndex() const { return IDX; } + + int getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) override; + + void addSelectionDAGCSEId(FoldingSetNodeID &ID) override; + + /// hasSameValue - Return true if this Xtensa constpool value can share the + /// same constantpool entry as another Xtensa constpool value. + bool hasSameValue(XtensaConstantPoolValue *ACPV) override; + + void print(raw_ostream &O) const override; + + static bool classof(const XtensaConstantPoolValue *ACPV) { + return ACPV->isJumpTable(); + } + + bool equals(const XtensaConstantPoolJumpTable *A) const { + return IDX == A->IDX && XtensaConstantPoolValue::equals(A); + } +}; + +} // namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSACONSTANTPOOLVALUE_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp new file mode 100644 index 0000000000000..8e828b93f3811 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -0,0 +1,388 @@ +//===- XtensaFrameLowering.cpp - Xtensa Frame Information ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file contains the Xtensa implementation of TargetFrameLowering class. +// +//===---------------------------------------------------------------------===// + +#include "XtensaFrameLowering.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/IR/Function.h" + +using namespace llvm; + +XtensaFrameLowering::XtensaFrameLowering() + : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0, 4) {} + +/* Xtensa stack frames look like: + + +-------------------------------+ + | incoming stack arguments | + +-------------------------------+ + A | caller-allocated save area | + | for register arguments | + +-------------------------------+ <-- incoming stack pointer + B | CALL0 ABI: | + | callee-allocated save area | + | for arguments that are | + | split between registers and | + | the stack (Register-Spill | + | Area) | + | | + | Win ABI: | + | Register-Spill Overflow | + | 8 words for CALL8/CALLX8 | + +-------------------------------+ <-- arg_pointer_rtx + C | callee-allocated save area | + | for register varargs | + +-------------------------------+ <-- hard_frame_pointer_rtx; + | | stack_pointer_rtx + gp_sp_offset + | GPR save area | + UNITS_PER_WORD + +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset + | | + UNITS_PER_HWVALUE + | FPR save area | + +-------------------------------+ <-- frame_pointer_rtx (virtual) + | local variables | + P +-------------------------------+ + | outgoing stack arguments | + +-------------------------------+ + | caller-allocated save area | + | for register arguments | + +-------------------------------+ <-- stack_pointer_rtx + + At least two of A, B and C will be empty. + + Dynamic stack allocations such as alloca insert data at point P. + They decrease stack_pointer_rtx but leave frame_pointer_rtx and + hard_frame_pointer_rtx unchanged. */ + +// hasFP - Return true if the specified function should have a dedicated frame +// pointer register. This is true if the function has variable sized allocas or +// if frame pointer elimination is disabled. +bool XtensaFrameLowering::hasFP(const MachineFunction &MF) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI.hasVarSizedObjects(); +} + +/* minimum frame = reg save area (4 words) plus static chain (1 word) + and the total number of words must be a multiple of 128 bits. */ +/* Width of a word, in units (bytes). */ +#define UNITS_PER_WORD 4 +#define MIN_FRAME_SIZE (8 * UNITS_PER_WORD) + +void XtensaFrameLowering::emitPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + assert(&MBB == &MF.front() && "Shrink-wrapping not yet implemented"); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const XtensaRegisterInfo *RegInfo = static_cast( + MF.getSubtarget().getRegisterInfo()); + const XtensaInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + MachineBasicBlock::iterator MBBI = MBB.begin(); + const XtensaSubtarget &STI = MF.getSubtarget(); + DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + unsigned SP = Xtensa::SP; + unsigned FP = RegInfo->getFrameRegister(MF); + MachineModuleInfo &MMI = MF.getMMI(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); + + // First, compute final stack size. + uint64_t StackSize = MFI.getStackSize(); + uint64_t PrevStackSize = StackSize; + + if (STI.isWinABI()) { + StackSize += 32; + // Round up StackSize to 8*N + StackSize += (8 - StackSize) & 0x7; + if (StackSize <= 32760) { + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::ENTRY)) + .addReg(SP) + .addImm(StackSize); + } else { + /* Use a8 as a temporary since a0-a7 may be live. */ + unsigned TmpReg = Xtensa::A8; + + const XtensaInstrInfo &TII = *static_cast( + MBB.getParent()->getSubtarget().getInstrInfo()); + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::ENTRY)) + .addReg(SP) + .addImm(MIN_FRAME_SIZE); + TII.loadImmediate(MBB, MBBI, &TmpReg, StackSize - MIN_FRAME_SIZE); + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::SUB), TmpReg) + .addReg(SP) + .addReg(TmpReg); + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::MOVSP), SP).addReg(TmpReg); + } + + // if framepointer enabled, set it to point to the stack pointer. + if (hasFP(MF)) { + // Insert instruction "move $fp, $sp" at this location. + if (STI.hasDensity()) { + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::MOV_N), FP) + .addReg(SP) + .setMIFlag(MachineInstr::FrameSetup); + } else { + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::OR), FP) + .addReg(SP) + .addReg(SP) + .setMIFlag(MachineInstr::FrameSetup); + } + + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa( + nullptr, MRI->getDwarfRegNum(FP, true), -StackSize); + unsigned CFIIndex = MF.addFrameInst(Inst); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } else { + // emit ".cfi_def_cfa_offset StackSize" + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } else { + // No need to allocate space on the stack. + if (StackSize == 0 && !MFI.adjustsStack()) + return; + + // MachineLocation DstML, SrcML; + + // Adjust stack. + TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); + + // emit ".cfi_def_cfa_offset StackSize" + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + + const std::vector &CSI = MFI.getCalleeSavedInfo(); + + if (CSI.size()) { + // Find the instruction past the last instruction that saves a + // callee-saved register to the stack. + for (unsigned i = 0; i < CSI.size(); ++i) + ++MBBI; + + // Iterate over list of callee-saved registers and emit .cfi_offset + // directives. + for (const auto &I : CSI) { + int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); + unsigned Reg = I.getReg(); + + // Reg is either in CPURegs or FGR32. + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( + nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } + + // if framepointer enabled, set it to point to the stack pointer. + if (hasFP(MF)) { + // Insert instruction "move $fp, $sp" at this location. + if (STI.hasDensity()) { + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::MOV_N), FP) + .addReg(SP) + .setMIFlag(MachineInstr::FrameSetup); + } else { + BuildMI(MBB, MBBI, dl, TII.get(Xtensa::OR), FP) + .addReg(SP) + .addReg(SP) + .setMIFlag(MachineInstr::FrameSetup); + } + + // emit ".cfi_def_cfa_register $fp" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( + nullptr, MRI->getDwarfRegNum(FP, true))); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } + + if (StackSize != PrevStackSize) { + MFI.setStackSize(StackSize); + + for (int i = MFI.getObjectIndexBegin(); i < MFI.getObjectIndexEnd(); i++) { + if (!MFI.isDeadObjectIndex(i)) { + int64_t SPOffset = MFI.getObjectOffset(i); + // errs() << "SPOffset = " + SPOffset << "\n"; + if (SPOffset < 0) + MFI.setObjectOffset(i, SPOffset - StackSize + PrevStackSize); + } + } + } +} + +void XtensaFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const XtensaRegisterInfo *RegInfo = static_cast( + MF.getSubtarget().getRegisterInfo()); + const XtensaInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + const XtensaSubtarget &STI = MF.getSubtarget(); + DebugLoc dl = MBBI->getDebugLoc(); + unsigned SP = Xtensa::SP; + unsigned FP = RegInfo->getFrameRegister(MF); + + // if framepointer enabled, restore the stack pointer. + if (hasFP(MF)) { + // Find the first instruction that restores a callee-saved register. + MachineBasicBlock::iterator I = MBBI; + + for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) + --I; + if (STI.isWinABI()) { + // Insert instruction "movsp $sp, $fp" at this location. + BuildMI(MBB, I, dl, TII.get(Xtensa::MOVSP), SP).addReg(FP); + } else { + BuildMI(MBB, I, dl, TII.get(Xtensa::OR), SP).addReg(FP).addReg(FP); + } + } + + if (STI.isWinABI()) + return; + + // Get the number of bytes from FrameInfo + uint64_t StackSize = MFI.getStackSize(); + + if (!StackSize) + return; + + // Adjust stack. + TII.adjustStackPtr(SP, StackSize, MBB, MBBI); +} + +bool XtensaFrameLowering::spillCalleeSavedRegisters( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const { + MachineFunction *MF = MBB.getParent(); + const XtensaSubtarget &STI = MF->getSubtarget(); + + if (STI.isWinABI()) + return true; + + MachineBasicBlock &EntryBlock = *(MF->begin()); + const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); + + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + // Add the callee-saved register as live-in. Do not add if the register is + // RA and return address is taken, because it has already been added in + // method XtensaTargetLowering::LowerRETURNADDR. + // It's killed at the spill, unless the register is RA and return address + // is taken. + unsigned Reg = CSI[i].getReg(); + bool IsRAAndRetAddrIsTaken = + (Reg == Xtensa::A0) && MF->getFrameInfo().isReturnAddressTaken(); + if (!IsRAAndRetAddrIsTaken) + EntryBlock.addLiveIn(Reg); + + // Insert the spill to the stack frame. + bool IsKill = !IsRAAndRetAddrIsTaken; + const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); + TII.storeRegToStackSlot(EntryBlock, MI, Reg, IsKill, CSI[i].getFrameIdx(), + RC, TRI); + } + + return true; +} + +bool XtensaFrameLowering::restoreCalleeSavedRegisters( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + std::vector &CSI, const TargetRegisterInfo *TRI) const { + MachineFunction *MF = MBB.getParent(); + const XtensaSubtarget &STI = MF->getSubtarget(); + if (STI.isWinABI()) + return true; + return TargetFrameLowering::restoreCalleeSavedRegisters(MBB, MI, CSI, TRI); +} + +// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions +MachineBasicBlock::iterator XtensaFrameLowering::eliminateCallFramePseudoInstr( + MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + const XtensaInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + + if (!hasReservedCallFrame(MF)) { + int64_t Amount = I->getOperand(0).getImm(); + + if (I->getOpcode() == Xtensa::ADJCALLSTACKDOWN) + Amount = -Amount; + + unsigned SP = Xtensa::SP; + TII.adjustStackPtr(SP, Amount, MBB, I); + } + + return MBB.erase(I); +} + +void XtensaFrameLowering::determineCalleeSaves(MachineFunction &MF, + BitVector &SavedRegs, + RegScavenger *RS) const { + const XtensaSubtarget &STI = MF.getSubtarget(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const XtensaRegisterInfo *RegInfo = static_cast( + MF.getSubtarget().getRegisterInfo()); + unsigned FP = RegInfo->getFrameRegister(MF); + + if (STI.isWinABI()) { + // It's some trick, 8 regsiters are marked as spilled, + // but real spill is in ENTRY instruction in case of register bank overflow + SavedRegs.resize(RegInfo->getNumRegs()); + for (int i = Xtensa::A8; i <= Xtensa::A15; i++) + SavedRegs.set(i); + + return; + } + + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); + + // Mark $fp as used if function has dedicated frame pointer. + if (hasFP(MF)) + SavedRegs.set(FP); + + // Set scavenging frame index if necessary. + uint64_t MaxSPOffset = MFI.estimateStackSize(MF); + + if (isInt<12>(MaxSPOffset)) + return; + + const TargetRegisterClass &RC = Xtensa::ARRegClass; + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + unsigned Size = TRI->getSpillSize(RC); + unsigned Align = TRI->getSpillAlignment(RC); + int FI = MF.getFrameInfo().CreateStackObject(Size, Align, false); + RS->addScavengingFrameIndex(FI); +} + +void XtensaFrameLowering::processFunctionBeforeFrameFinalized( + MachineFunction &MF, RegScavenger *RS) const { + const XtensaSubtarget &STI = MF.getSubtarget(); + // XtensaFunctionInfo *XFI = MF.getInfo(); + // In WinABI mode add register scavenging slot + if (STI.isWinABI() && (MF.getFrameInfo().estimateStackSize(MF) > 512)) { + MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetRegisterClass &RC = Xtensa::ARRegClass; + const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); + unsigned Size = TRI.getSpillSize(RC); + unsigned Align = TRI.getSpillAlignment(RC); + RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false)); + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.h b/llvm/lib/Target/Xtensa/XtensaFrameLowering.h new file mode 100644 index 0000000000000..647c3bea7e3bc --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.h @@ -0,0 +1,53 @@ +//===- XtensaFrameLowering.h - Define frame lowering for Xtensa -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------==// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAFRAMELOWERING_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAFRAMELOWERING_H + +#include "llvm/CodeGen/TargetFrameLowering.h" + +namespace llvm { +class XtensaTargetMachine; +class XtensaSubtarget; + +class XtensaFrameLowering : public TargetFrameLowering { +public: + XtensaFrameLowering(); + + bool hasFP(const MachineFunction &MF) const override; + + /// emitProlog/emitEpilog - These methods insert prolog and epilog code into + /// the function. + void emitPrologue(MachineFunction &, MachineBasicBlock &) const override; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + + MachineBasicBlock::iterator + eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const override; + + bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const override; + bool + restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + std::vector &CSI, + const TargetRegisterInfo *TRI) const override; + + void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, + RegScavenger *RS) const override; + + void processFunctionBeforeFrameFinalized(MachineFunction &MF, + RegScavenger *RS) const override; +}; + +} // namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAFRAMELOWERING_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp new file mode 100644 index 0000000000000..596ece43c07b8 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -0,0 +1,163 @@ +//===- XtensaISelDAGToDAG.cpp - A dag to dag inst selector for Xtensa -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an instruction selector for the Xtensa target. +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaTargetMachine.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-isel" + +namespace { + +class XtensaDAGToDAGISel : public SelectionDAGISel { + const XtensaSubtarget *Subtarget; + +public: + XtensaDAGToDAGISel(XtensaTargetMachine &TM, CodeGenOpt::Level OptLevel) + : SelectionDAGISel(TM, OptLevel), Subtarget(TM.getSubtargetImpl()) {} + + // Override MachineFunctionPass. + StringRef getPassName() const override { + return "Xtensa DAG->DAG Pattern Instruction Selection"; + } + + // Override SelectionDAGISel. + void Select(SDNode *Node) override; + + bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, + std::vector &OutOps) override; + + bool selectMemRegAddr(SDValue Addr, SDValue &Base, SDValue &Offset, + int Scale) { + EVT ValTy = Addr.getValueType(); + + // if Address is FI, get the TargetFrameIndex. + if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), ValTy); + + return true; + } + + if (TM.isPositionIndependent()) + report_fatal_error("PIC relocations is not supported"); + + if ((Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress)) + return false; + + // Addresses of the form FI+const or FI|const + bool Valid = false; + if (CurDAG->isBaseWithConstantOffset(Addr)) { + ConstantSDNode *CN = dyn_cast(Addr.getOperand(1)); + int64_t OffsetVal = CN->getSExtValue(); + + switch (Scale) { + case 1: + Valid = (OffsetVal >= 0 && OffsetVal <= 255); + break; + case 2: + Valid = + (OffsetVal >= 0 && OffsetVal <= 510) && ((OffsetVal & 0x1) == 0); + break; + case 4: + Valid = + (OffsetVal >= 0 && OffsetVal <= 1020) && ((OffsetVal & 0x3) == 0); + break; + default: + break; + } + + if (Valid) { + // If the first operand is a FI, get the TargetFI Node + if (FrameIndexSDNode *FIN = + dyn_cast(Addr.getOperand(0))) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + else + Base = Addr.getOperand(0); + + Offset = + CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), ValTy); + return true; + } + } + + // Last case + Base = Addr; + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Addr.getValueType()); + return true; + } + + bool selectMemRegAddrISH1(SDValue Addr, SDValue &Base, SDValue &Offset) { + return selectMemRegAddr(Addr, Base, Offset, 1); + } + + bool selectMemRegAddrISH2(SDValue Addr, SDValue &Base, SDValue &Offset) { + return selectMemRegAddr(Addr, Base, Offset, 2); + } + + bool selectMemRegAddrISH4(SDValue Addr, SDValue &Base, SDValue &Offset) { + return selectMemRegAddr(Addr, Base, Offset, 4); + } + +// Include the pieces autogenerated from the target description. +#include "XtensaGenDAGISel.inc" +}; // namespace +} // end anonymous namespace + +FunctionPass *llvm::createXtensaISelDag(XtensaTargetMachine &TM, + CodeGenOpt::Level OptLevel) { + return new XtensaDAGToDAGISel(TM, OptLevel); +} + +void XtensaDAGToDAGISel::Select(SDNode *Node) { + SDLoc DL(Node); + // Dump information about the Node being selected + LLVM_DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n"); + + // If we have a custom node, we already have selected! + if (Node->isMachineOpcode()) { + LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); + return; + } + + SelectCode(Node); +} + +bool XtensaDAGToDAGISel::SelectInlineAsmMemoryOperand( + const SDValue &Op, unsigned ConstraintID, std::vector &OutOps) { + switch (ConstraintID) { + default: + llvm_unreachable("Unexpected asm memory constraint"); + case InlineAsm::Constraint_m: { + SDValue Base, Offset; + // TODO + selectMemRegAddr(Op, Base, Offset, 4); + OutOps.push_back(Base); + OutOps.push_back(Offset); + return false; + } + case InlineAsm::Constraint_i: + case InlineAsm::Constraint_R: + case InlineAsm::Constraint_ZC: + OutOps.push_back(Op); + return false; + } + return false; +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp new file mode 100644 index 0000000000000..65593dbfb65a5 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -0,0 +1,2785 @@ +//===- XtensaISelLowering.cpp - Xtensa DAG Lowering Implementation --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that Xtensa uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#include "XtensaISelLowering.h" +#include "MCTargetDesc/XtensaBaseInfo.h" +//#include "XtensaCallingConv.h" +#include "XtensaConstantPoolValue.h" +#include "XtensaMachineFunctionInfo.h" +#include "XtensaSubtarget.h" +#include "XtensaTargetMachine.h" +#include "XtensaTargetObjectFile.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-lower" + +static const MCPhysReg XtensaArgRegs[6] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, + Xtensa::A5, Xtensa::A6, Xtensa::A7}; + +// Return true if we must use long (in fact, indirect) function call. +// It's simplified version, production implimentation must +// resolve a functions in ROM (usually glibc functions) +static bool isLongCall(const char *str) { + // Currently always use long calls + return true; +} + +// The calling conventions in XtensaCallingConv.td are described in terms of the +// callee's register window. This function translates registers to the +// corresponding caller window %o register. +static unsigned toCallerWindow(unsigned Reg) { + if (Reg >= Xtensa::A2 && Reg <= Xtensa::A7) + return Reg - Xtensa::A2 + Xtensa::A10; + return Reg; +} + +XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &tm, + const XtensaSubtarget &STI) + : TargetLowering(tm), Subtarget(STI) { + MVT PtrVT = MVT::i32; + // Set up the register classes. + addRegisterClass(MVT::i32, &Xtensa::ARRegClass); + if (Subtarget.hasSingleFloat()) { + addRegisterClass(MVT::f32, &Xtensa::FPRRegClass); + } + // addRegisterClass(MVT::i32, &Xtensa::URRegClass); + + // Set up special registers. + setStackPointerRegisterToSaveRestore(Xtensa::SP); + + setSchedulingPreference(Sched::RegPressure); + + // For i1 types all bits are zero except bit 0 + setBooleanContents(ZeroOrOneBooleanContent); + setBooleanVectorContents( + ZeroOrOneBooleanContent); // vectors of i1s are the same + + // Used by legalize types to correctly generate the setcc result. + // AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); + setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); + setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32); + + setMinFunctionAlignment(Align(4)); + + setOperationAction(ISD::BR_CC, MVT::i32, Legal); + setOperationAction(ISD::BR_CC, MVT::i64, Expand); + if (Subtarget.hasSingleFloat()) + setOperationAction(ISD::BR_CC, MVT::f32, Custom); + else + setOperationAction(ISD::BR_CC, MVT::f32, Expand); + + setOperationAction(ISD::SELECT, MVT::i32, Expand); + setOperationAction(ISD::SELECT, MVT::i64, Expand); + setOperationAction(ISD::SELECT, MVT::f32, Expand); + + setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); + setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); + if (Subtarget.hasSingleFloat()) + setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); + else + setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); + + setOperationAction(ISD::SETCC, MVT::i32, + Custom /* Legal */); // folds into brcond + setOperationAction(ISD::SETCC, MVT::i64, Expand); + if (Subtarget.hasSingleFloat()) + setOperationAction(ISD::SETCC, MVT::f32, Custom); + else + setOperationAction(ISD::SETCC, MVT::f32, Expand); + + setOperationAction(ISD::Constant, MVT::i32, Custom); + setOperationAction(ISD::Constant, MVT::i64, Expand /*Custom */); + setOperationAction(ISD::ConstantFP, MVT::f32, Custom); + + // Expand jump table branches as address arithmetic followed by an + // indirect jump. + setOperationAction(ISD::BR_JT, MVT::Other, Custom); + // Xtensa also does not have indirect branch so expand them + setOperationAction(ISD::BRIND, MVT::Other, Expand); + + // make BRCOND legal, its actually only legal for a subset of conds + setOperationAction(ISD::BRCOND, MVT::Other, Legal); + + // Custom Lower Overflow operators + + // Handle integer types. + for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; + I <= MVT::LAST_INTEGER_VALUETYPE; ++I) { + MVT VT = MVT::SimpleValueType(I); + if (isTypeLegal(VT)) { + // No support at all + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + + setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); + } + } + + if (Subtarget.hasMul32()) + setOperationAction(ISD::MUL, MVT::i32, Legal); + else + setOperationAction(ISD::MUL, MVT::i32, Expand); + + if (Subtarget.hasMul32High()) { + setOperationAction(ISD::MULHU, MVT::i32, Legal); + setOperationAction(ISD::MULHS, MVT::i32, Legal); + } else { + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + } + setOperationAction(ISD::MUL, MVT::i64, Expand); + setOperationAction(ISD::MULHS, MVT::i64, Expand); + setOperationAction(ISD::MULHU, MVT::i64, Expand); + + if (Subtarget.hasDiv32()) { + setOperationAction(ISD::SDIV, MVT::i32, Legal); + setOperationAction(ISD::UDIV, MVT::i32, Legal); + setOperationAction(ISD::SREM, MVT::i32, Legal); + setOperationAction(ISD::UREM, MVT::i32, Legal); + } else { + setOperationAction(ISD::SDIV, MVT::i32, Expand); + setOperationAction(ISD::UDIV, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::UREM, MVT::i32, Expand); + } + setOperationAction(ISD::SDIV, MVT::i64, Expand); + setOperationAction(ISD::UDIV, MVT::i64, Expand); + setOperationAction(ISD::SREM, MVT::i64, Expand); + setOperationAction(ISD::UREM, MVT::i64, Expand); + + // Xtensa doesn't support [ADD,SUB][E,C] + setOperationAction(ISD::ADDC, MVT::i32, Expand); + setOperationAction(ISD::ADDE, MVT::i32, Expand); + setOperationAction(ISD::SUBC, MVT::i32, Expand); + setOperationAction(ISD::SUBE, MVT::i32, Expand); + + setOperationAction(ISD::ADD, MVT::i64, Expand); + setOperationAction(ISD::SUB, MVT::i64, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + + // Xtensa doesn't support s[hl,rl,ra]_parts + // TODO + setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); + + // Bit Manipulation + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + // Xtensa doesn't support s[hl,rl,ra]_parts + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTR, MVT::i32, Expand); + // No special instructions for these. + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + + setOperationAction(ISD::TRAP, MVT::Other, Legal); + + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); + + // No sign extend instructions for i1 + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + } + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); + + setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction(ISD::BSWAP, MVT::i64, Expand); + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTLZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + + // Handle the various types of symbolic address. + setOperationAction(ISD::ConstantPool, PtrVT, Custom); + setOperationAction(ISD::GlobalAddress, PtrVT, Custom); + setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); + setOperationAction(ISD::BlockAddress, PtrVT, Custom); + setOperationAction(ISD::JumpTable, PtrVT, Custom); + + // Expand stack allocations + setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom /* Expand */); + + // Use custom expanders so that we can force the function to use + // a frame pointer. + // TODO: real comment + setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); + setOperationAction(ISD::FRAMEADDR, MVT::Other, Custom); + + // Handle floating-point types. + // TODO + for (unsigned I = MVT::FIRST_FP_VALUETYPE; I <= MVT::LAST_FP_VALUETYPE; ++I) { + MVT VT = MVT::SimpleValueType(I); + if (isTypeLegal(VT)) { + // We can use FI for FRINT. + // setOperationAction(ISD::FRINT, VT, Legal); + if (VT.getSizeInBits() == 32 && Subtarget.hasSingleFloat()) { + setOperationAction(ISD::FADD, VT, Legal); + setOperationAction(ISD::FSUB, VT, Legal); + setOperationAction(ISD::FMUL, VT, Legal); + setOperationAction(ISD::FDIV, VT, Expand); + } else { + setOperationAction(ISD::FADD, VT, Expand); + setOperationAction(ISD::FSUB, VT, Expand); + setOperationAction(ISD::FMUL, VT, Expand); + setOperationAction(ISD::FDIV, VT, Expand); + } + + // TODO: once implemented in InstrInfo uncomment + setOperationAction(ISD::FSQRT, VT, Expand); + + // No special instructions for these. + setOperationAction(ISD::FSIN, VT, Expand); + setOperationAction(ISD::FCOS, VT, Expand); + setOperationAction(ISD::FREM, VT, Expand); + setOperationAction(ISD::FABS, VT, Expand); + } + } + + // Handle floating-point types. + if (Subtarget.hasSingleFloat()) { + setOperationAction(ISD::FMA, MVT::f32, Legal); + setOperationAction(ISD::BITCAST, MVT::i32, Legal); + setOperationAction(ISD::BITCAST, MVT::f32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); + setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); + } else { + setOperationAction(ISD::FMA, MVT::f32, Expand); + setOperationAction(ISD::SETCC, MVT::f32, Expand); + setOperationAction(ISD::BITCAST, MVT::i32, Expand); + setOperationAction(ISD::BITCAST, MVT::f32, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand); + } + setOperationAction(ISD::FMA, MVT::f64, Expand); + setOperationAction(ISD::SETCC, MVT::f64, Expand); + setOperationAction(ISD::BITCAST, MVT::i64, Expand); + setOperationAction(ISD::BITCAST, MVT::f64, Expand); + + // Needed so that we don't try to implement f128 constant loads using + // a load-and-extend of a f80 constant (in cases where the constant + // would fit in an f80). + for (MVT VT : MVT::fp_valuetypes()) + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); + + // Floating-point truncation and stores need to be done separately. + setTruncStoreAction(MVT::f64, MVT::f32, Expand); + + // We have 64-bit FPR<->GPR moves, but need special handling for + // 32-bit forms. + + // VASTART and VACOPY need to deal with the Xtensa-specific varargs + // structure, but VAEND is a no-op. + setOperationAction(ISD::VASTART, MVT::Other, Custom); + // we use special va_list structure so we have to customize this + setOperationAction(ISD::VAARG, MVT::Other, Custom); + setOperationAction(ISD::VACOPY, MVT::Other, Custom); + + setOperationAction(ISD::VAEND, MVT::Other, Expand); + + // to have the best chance and doing something good with fences custom lower + // them + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + + if (!Subtarget.hasS32C1I()) { + for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; + I <= MVT::LAST_INTEGER_VALUETYPE; ++I) { + MVT VT = MVT::SimpleValueType(I); + if (isTypeLegal(VT)) { + setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); + } + } + } + + if (Subtarget.hasSingleFloat()) { + setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); + setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); + setCondCodeAction(ISD::SETONE, MVT::f32, Expand); + setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); + setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); + + setTargetDAGCombine(ISD::FADD); + setTargetDAGCombine(ISD::FSUB); + setTargetDAGCombine(ISD::BRCOND); + } + + // Compute derived properties from the register classes + computeRegisterProperties(STI.getRegisterInfo()); + + if (Subtarget.hasBoolean()) { + addRegisterClass(MVT::i1, &Xtensa::BRRegClass); + } +} + +/// If a physical register, this returns the register that receives the +/// exception address on entry to an EH pad. +unsigned XtensaTargetLowering::getExceptionPointerRegister( + const Constant *PersonalityFn) const { + return Xtensa::A2; +} + +/// If a physical register, this returns the register that receives the +/// exception typeid on entry to a landing pad. +unsigned XtensaTargetLowering::getExceptionSelectorRegister( + const Constant *PersonalityFn) const { + return Xtensa::A3; +} + +bool XtensaTargetLowering::isOffsetFoldingLegal( + const GlobalAddressSDNode *GA) const { + // The Xtensa target isn't yet aware of offsets. + return false; +} + +bool XtensaTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const { + return false; +} + +unsigned XtensaTargetLowering::getVaListSizeInBits(const DataLayout &DL) const { + // 2 * sizeof(int*) + sizeof(int) + return 3 * 4; +} + +//===----------------------------------------------------------------------===// +// Inline asm support +//===----------------------------------------------------------------------===// +TargetLowering::ConstraintType +XtensaTargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'a': + case 'd': + case 'f': + case 'r': + return C_RegisterClass; + + default: + break; + } + } + return TargetLowering::getConstraintType(Constraint); +} + +TargetLowering::ConstraintWeight +XtensaTargetLowering::getSingleConstraintMatchWeight( + AsmOperandInfo &info, const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + if (CallOperandVal == NULL) + return CW_Default; + Type *type = CallOperandVal->getType(); + // Look at the constraint type. + switch (*constraint) { + default: + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + + case 'a': + case 'd': + case 'r': + if (CallOperandVal->getType()->isIntegerTy()) + weight = CW_Register; + break; + + case 'f': + if (type->isFloatingPointTy()) + weight = CW_Register; + break; + } + return weight; +} + +std::pair +XtensaTargetLowering::getRegForInlineAsmConstraint( + const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { + if (Constraint.size() == 1) { + // GCC Constraint Letters + switch (Constraint[0]) { + default: + break; + case 'a': // Address register + case 'd': // Data register (equivalent to 'r') + case 'r': // General-purpose register + return std::make_pair(0U, &Xtensa::ARRegClass); + + case 'f': // Floating-point register + if (Subtarget.hasSingleFloat()) + return std::make_pair( + 0U, &Xtensa::ARRegClass /* TODO Xtensa::FP32BitRegClass */); + return std::make_pair(0U, &Xtensa::ARRegClass); + } + } + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} + +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void XtensaTargetLowering::LowerAsmOperandForConstraint( + SDValue Op, std::string &Constraint, std::vector &Ops, + SelectionDAG &DAG) const { + SDLoc DL(Op); + + // Only support length 1 constraints for now. + if (Constraint.length() > 1) + return; + + TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} +//===----------------------------------------------------------------------===// +// DAG Combine functions +//===----------------------------------------------------------------------===// +static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, + const XtensaSubtarget &Subtarget) { + if (ROOTNode->getOperand(0).getValueType() != MVT::f32) + return SDValue(); + + if (ROOTNode->getOperand(0).getOpcode() != ISD::FMUL && + ROOTNode->getOperand(1).getOpcode() != ISD::FMUL) + return SDValue(); + + SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::FMUL + ? ROOTNode->getOperand(0) + : ROOTNode->getOperand(1); + + SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::FMUL + ? ROOTNode->getOperand(1) + : ROOTNode->getOperand(0); + + if (!Mult.hasOneUse()) + return SDValue(); + + SDLoc DL(ROOTNode); + + bool IsAdd = ROOTNode->getOpcode() == ISD::FADD; + unsigned Opcode = IsAdd ? XtensaISD::MADD : XtensaISD::MSUB; + SDValue MAddOps[3] = {AddOperand, Mult->getOperand(0), Mult->getOperand(1)}; + EVT VTs[3] = {MVT::f32, MVT::f32, MVT::f32}; + SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps); + + return MAdd; +} + +static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + if (DCI.isBeforeLegalizeOps()) { + if (Subtarget.hasSingleFloat() && N->getValueType(0) == MVT::f32) + return performMADD_MSUBCombine(N, DAG, Subtarget); + } + return SDValue(); +} + +static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + if (DCI.isBeforeLegalizeOps()) { + if (Subtarget.hasSingleFloat() && N->getValueType(0) == MVT::f32) + return performMADD_MSUBCombine(N, DAG, Subtarget); + } + return SDValue(); +} + +static SDValue performBRCONDCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + if (DCI.isBeforeLegalizeOps()) { + SDValue Chain = N->getOperand(0); + + if (N->getOperand(1).getOpcode() != ISD::SETCC) + return SDValue(); + + SDLoc DL(N); + SDValue SetCC = N->getOperand(1); + SDValue Dest = N->getOperand(2); + ISD::CondCode CC = cast(SetCC->getOperand(2))->get(); + SDValue LHS = SetCC->getOperand(0); + SDValue RHS = SetCC->getOperand(1); + + if (LHS.getValueType() != MVT::i32) + return SDValue(); + + return DAG.getNode(ISD::BR_CC, DL, MVT::isVoid, Chain, DAG.getCondCode(CC), + LHS, RHS, Dest); + } + return SDValue(); +} + +SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, + DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; + unsigned Opc = N->getOpcode(); + + switch (Opc) { + default: + break; + case ISD::FADD: + return performADDCombine(N, DAG, DCI, Subtarget); + case ISD::FSUB: + return performSUBCombine(N, DAG, DCI, Subtarget); + case ISD::BRCOND: + return performBRCONDCombine(N, DAG, DCI, Subtarget); + } + + return SDValue(); +} + +//===----------------------------------------------------------------------===// +// Lower helper functions +//===----------------------------------------------------------------------===// +#if 0 +// addLiveIn - This helper function adds the specified physical register to +// the MachineFunction as a live in value. It also creates a corresponding +// virtual register for it. +static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, + const TargetRegisterClass *RC) { + unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); + MF.getRegInfo().addLiveIn(PReg, VReg); + return VReg; +} +#endif +//===----------------------------------------------------------------------===// +// Calling conventions +//===----------------------------------------------------------------------===// + +#include "XtensaGenCallingConv.inc" + +static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State) { + static const MCPhysReg IntRegs[] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, + Xtensa::A5, Xtensa::A6, Xtensa::A7}; + + // Do not process byval args here. + if (ArgFlags.isByVal()) + return true; + + // Promote i8 and i16 + if (LocVT == MVT::i8 || LocVT == MVT::i16) { + LocVT = MVT::i32; + if (ArgFlags.isSExt()) + LocInfo = CCValAssign::SExt; + else if (ArgFlags.isZExt()) + LocInfo = CCValAssign::ZExt; + else + LocInfo = CCValAssign::AExt; + } + + unsigned Reg; + + unsigned OrigAlign = ArgFlags.getOrigAlign(); + bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8); + + if (ValVT == MVT::i32 || ValVT == MVT::f32) { + Reg = State.AllocateReg(IntRegs); + // If this is the first part of an i64 arg, + // the allocated register must be either A0 or A2. + if (isI64 && (Reg == Xtensa::A3 || Reg == Xtensa::A5 || Reg == Xtensa::A7)) + Reg = State.AllocateReg(IntRegs); + LocVT = MVT::i32; + } else if (ValVT == MVT::f64) { + // Allocate int register and shadow next int register. + Reg = State.AllocateReg(IntRegs); + if (Reg == Xtensa::A3 || Reg == Xtensa::A5 || Reg == Xtensa::A7) + Reg = State.AllocateReg(IntRegs); + State.AllocateReg(IntRegs); + LocVT = MVT::i32; + } else + llvm_unreachable("Cannot handle this ValVT."); + + if (!Reg) { + unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign); + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); + } else + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + + return false; +} + +CCAssignFn *XtensaTargetLowering::CCAssignFnForCall(CallingConv::ID CC, + bool isVarArg) const { + // return isVarArg ? CC_Xtensa_VAR : CC_Xtensa; + return CC_Xtensa_Custom; +} + +// Value is a value that has been passed to us in the location described by VA +// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining +// any loads onto Chain. +static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, + CCValAssign &VA, SDValue Chain, + SDValue Value) { + // If the argument has been promoted from a smaller type, insert an + // assertion to capture this. + if (VA.getLocInfo() == CCValAssign::SExt) + Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, + DAG.getValueType(VA.getValVT())); + else if (VA.getLocInfo() == CCValAssign::ZExt) + Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, + DAG.getValueType(VA.getValVT())); + + if (VA.isExtInLoc()) + Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); + else if (VA.getLocInfo() == CCValAssign::Indirect) + Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, MachinePointerInfo()); + else if (VA.getValVT() == MVT::f32) + Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); + else + assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); + return Value; +} + +// Value is a value of type VA.getValVT() that we need to copy into +// the location described by VA. Return a copy of Value converted to +// VA.getValVT(). The caller is responsible for handling indirect values. +static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, CCValAssign &VA, + SDValue Value) { + switch (VA.getLocInfo()) { + case CCValAssign::SExt: + return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); + case CCValAssign::ZExt: + return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); + case CCValAssign::AExt: + return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); + case CCValAssign::BCvt: + return DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Value); + case CCValAssign::Full: + return Value; + default: + llvm_unreachable("Unhandled getLocInfo()"); + } +} + +SDValue XtensaTargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + XtensaFunctionInfo *XtensaFI = MF.getInfo(); + EVT PtrVT = getPointerTy(MF.getDataLayout()); + + // errs() >> "function " << MF.getName() << "\n"; + + XtensaFI->setVarArgsFrameIndex(0); + + // Used with vargs to acumulate store chains. + std::vector OutChains; + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + + CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, IsVarArg)); + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + // Arguments stored on registers + if (VA.isRegLoc()) { + EVT RegVT = VA.getLocVT(); + const TargetRegisterClass *RC; + + if (RegVT == MVT::i32) { + RC = &Xtensa::ARRegClass; + } else + llvm_unreachable("RegVT not supported by FormalArguments Lowering"); + + // Transform the arguments stored on + // physical registers into virtual ones + unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); + + // If this is an 8 or 16-bit value, it has been passed promoted + // to 32 bits. Insert an assert[sz]ext to capture this, then + // truncate to the right size. + if (VA.getLocInfo() != CCValAssign::Full) { + unsigned Opcode = 0; + if (VA.getLocInfo() == CCValAssign::SExt) + Opcode = ISD::AssertSext; + else if (VA.getLocInfo() == CCValAssign::ZExt) + Opcode = ISD::AssertZext; + if (Opcode) + ArgValue = DAG.getNode(Opcode, DL, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + if (VA.getValVT() == MVT::f32) + ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue); + else + ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue); + } + + InVals.push_back(ArgValue); + + } else { // !VA.isRegLoc() + // sanity check + assert(VA.isMemLoc()); + + EVT ValVT = VA.getValVT(); + + // The stack pointer offset is relative to the caller stack frame. + int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, + VA.getLocMemOffset(), true); + + // Create load nodes to retrieve arguments from the stack + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + InVals.push_back(DAG.getLoad( + ValVT, DL, Chain, FIN, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); + } + } + + if (IsVarArg) { + ArrayRef ArgRegs = makeArrayRef(XtensaArgRegs); + unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); + const TargetRegisterClass *RC = &Xtensa::ARRegClass; + MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + unsigned RegSize = 4; + MVT RegTy = MVT::getIntegerVT(RegSize * 8); + + XtensaFI->setVarArgsFirstGPR(Idx + 2); // 2 - number of a2 register + + XtensaFI->setVarArgsStackOffset(MFI.CreateFixedObject( + PtrVT.getSizeInBits() / 8, CCInfo.getNextStackOffset(), true)); + + // Offset of the first variable argument from stack pointer, and size of + // the vararg save area. For now, the varargs save area is either zero or + // large enough to hold a0-a7. + int VaArgOffset, VarArgsSaveSize; + + // If all registers are allocated, then all varargs must be passed on the + // stack and we don't need to save any argregs. + if (ArgRegs.size() == Idx) { + VaArgOffset = CCInfo.getNextStackOffset(); + VarArgsSaveSize = 0; + } else { + VarArgsSaveSize = RegSize * (ArgRegs.size() - Idx); + VaArgOffset = -VarArgsSaveSize; + } + + // Record the frame index of the first variable argument + // which is a value necessary to VASTART. + int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); + XtensaFI->setVarArgsFrameIndex(FI); + + // Copy the integer registers that may have been used for passing varargs + // to the vararg save area. + for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) { + const unsigned Reg = RegInfo.createVirtualRegister(RC); + RegInfo.addLiveIn(ArgRegs[I], Reg); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy); + FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); + SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, + MachinePointerInfo::getFixedStack(MF, FI)); + cast(Store.getNode()) + ->getMemOperand() + ->setValue((Value *)nullptr); + OutChains.push_back(Store); + } + } + + // All stores are grouped in one node to allow the matching between + // the size of Ins and InVals. This only happens when on varg functions + if (!OutChains.empty()) { + OutChains.push_back(Chain); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); + } + + return Chain; +} + +SDValue XtensaTargetLowering::getTargetNode(SDValue Op, SelectionDAG &DAG, + unsigned Flag) const { + EVT Ty = getPointerTy(DAG.getDataLayout()); + + if (GlobalAddressSDNode *N = dyn_cast(Op)) + return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(Op), Ty, 0, Flag); + if (ExternalSymbolSDNode *N = dyn_cast(Op)) + return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag); + if (BlockAddressSDNode *N = dyn_cast(Op)) + return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); + if (JumpTableSDNode *N = dyn_cast(Op)) + return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag); + if (ConstantPoolSDNode *N = dyn_cast(Op)) + return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), + N->getOffset(), Flag); + + llvm_unreachable("Unexpected node type."); + return SDValue(); +} + +SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT Ty = Op.getValueType(); + return DAG.getNode(XtensaISD::PCREL_WRAPPER, DL, Ty, Op); +} + +SDValue +XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const { + SelectionDAG &DAG = CLI.DAG; + SDLoc &DL = CLI.DL; + SmallVector &Outs = CLI.Outs; + SmallVector &OutVals = CLI.OutVals; + SmallVector &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &isTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool IsVarArg = CLI.IsVarArg; + + MachineFunction &MF = DAG.getMachineFunction(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + const TargetFrameLowering *TFL = Subtarget.getFrameLowering(); + + // Xtensa target does not yet support tail call optimization. + isTailCall = false; + + // Analyze the operands of the call, assigning locations to each operand. + SmallVector ArgLocs; + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); + + CCAssignFn *CC = CCAssignFnForCall(CallConv, IsVarArg); + + CCInfo.AnalyzeCallOperands(Outs, CC); + + // + // Get a count of how many bytes are to be pushed on the stack. + unsigned NumBytes = CCInfo.getNextStackOffset(); + + unsigned StackAlignment = TFL->getStackAlignment(); + unsigned NextStackOffset = alignTo(NumBytes, StackAlignment); + + // Mark the start of the call. + + // TODO + // if (!IsTailCall) + Chain = DAG.getCALLSEQ_START(Chain, NextStackOffset, 0, DL); + + // Copy argument values to their designated locations. + std::deque> RegsToPass; + SmallVector MemOpChains; + SDValue StackPtr; + for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { + CCValAssign &VA = ArgLocs[I]; + SDValue ArgValue = OutVals[I]; + ISD::ArgFlagsTy Flags = Outs[I].Flags; + + ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); + + if (VA.isRegLoc()) + // Queue up the argument copies and emit them at the end. + RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); + else if (Flags.isByVal()) { + assert(VA.isMemLoc()); + assert(Flags.getByValSize() && + "ByVal args of size 0 should have been ignored by front-end."); + assert(!isTailCall && + "Do not tail-call optimize if there is a byval argument."); + + // True if this byval aggregate will be split between registers + // and memory. + unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); + unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); + if (CurByValIdx < ByValArgsCount) { + unsigned RegBegin, RegEnd; + CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); + + EVT PtrVT = + DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); + unsigned int i, j; + for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { + SDValue Const = DAG.getConstant( + 4 * i, DL, MVT::i32); // TODO:should this i32 be ptrTy + SDValue AddArg = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Const); + SDValue Load = + DAG.getLoad(PtrVT, DL, Chain, AddArg, MachinePointerInfo(), + DAG.InferPtrAlignment(AddArg)); + MemOpChains.push_back(Load.getValue(1)); + RegsToPass.push_back(std::make_pair(j, Load)); + } + + CCInfo.nextInRegsParam(); + } + + // TODO: Handle byvals partially or entirely not in registers + + } else { + assert(VA.isMemLoc() && "Argument not register or memory"); + + // Work out the address of the stack slot. Unpromoted ints and + // floats are passed as right-justified 8-byte values. + if (!StackPtr.getNode()) + StackPtr = DAG.getCopyFromReg(Chain, DL, Xtensa::SP, PtrVT); + unsigned Offset = VA.getLocMemOffset(); + SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, + DAG.getIntPtrConstant(Offset, DL)); + + // Emit the store. + MemOpChains.push_back( + DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); + } + } + + // Join the stores, which are independent of one another. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); + + // Build a sequence of copy-to-reg nodes, chained and glued together. + SDValue Glue; + for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { + unsigned Reg = RegsToPass[I].first; + if (Subtarget.isWinABI()) + Reg = toCallerWindow(Reg); + Chain = DAG.getCopyToReg(Chain, DL, Reg, RegsToPass[I].second, Glue); + Glue = Chain.getValue(1); + } + + // const char *name = 0; + std::string name; + + unsigned char TF = 0; + + // Accept direct calls by converting symbolic call addresses to the + // associated Target* opcodes. + if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { + name = E->getSymbol(); + TF = E->getTargetFlags(); + if (isPositionIndependent()) { + report_fatal_error("PIC relocations is not supported"); + } else + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF); + } else if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + // TODO replace GlobalAddress to some special operand instead of + // ExternalSymbol + // Callee = + // DAG.getTargetExternalSymbol(strdup(G->getGlobal()->getName().str().c_str()), + // PtrVT); + + const GlobalValue *GV = G->getGlobal(); + name = GV->getName().str(); + } + + if ((!name.empty()) && isLongCall(name.c_str())) { + // Create a constant pool entry for the callee address + XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier; + + XtensaConstantPoolValue *CPV = XtensaConstantPoolSymbol::Create( + *DAG.getContext(), name.c_str(), 0 /* XtensaCLabelIndex */, false, + Modifier); + + // Get the address of the callee into a register + SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4, 0, TF); + SDValue CPWrap = getAddrPCRel(CPAddr, DAG); + Callee = CPWrap; + } + + // The first call operand is the chain and the second is the target address. + SmallVector Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + + // TODO if (!IsTailCall) + { + // Add a register mask operand representing the call-preserved registers. + const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); + const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + Ops.push_back(DAG.getRegisterMask(Mask)); + } + + // Add argument registers to the end of the list so that they are + // known live into the call. + for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { + unsigned Reg = RegsToPass[I].first; + if (Subtarget.isWinABI()) + Reg = toCallerWindow(Reg); + Ops.push_back(DAG.getRegister(Reg, RegsToPass[I].second.getValueType())); + } + + // Glue the call to the argument copies, if any. + if (Glue.getNode()) + Ops.push_back(Glue); + + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + Chain = DAG.getNode(Subtarget.isWinABI() ? XtensaISD::CALLW : XtensaISD::CALL, + DL, NodeTys, Ops); + Glue = Chain.getValue(1); + + // Mark the end of the call, which is glued to the call itself. + Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, DL, PtrVT, true), + DAG.getConstant(0, DL, PtrVT, true), Glue, DL); + Glue = Chain.getValue(1); + + // Assign locations to each value returned by this call. + SmallVector RetLocs; + CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); + RetCCInfo.AnalyzeCallResult(Ins, Subtarget.isWinABI() ? RetCCW_Xtensa + : RetCC_Xtensa); + + // Copy all of the result registers out of their specified physreg. + for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { + CCValAssign &VA = RetLocs[I]; + + // Copy the value out, gluing the copy to the end of the call sequence. + unsigned Reg = VA.getLocReg(); + // if (Subtarget.isWinABI()) + // Reg = toCallerWindow(Reg); + SDValue RetValue = DAG.getCopyFromReg(Chain, DL, Reg, VA.getLocVT(), Glue); + Chain = RetValue.getValue(1); + Glue = RetValue.getValue(2); + + // Convert the value of the return register into the value that's + // being returned. + InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); + } + return Chain; +} + +/// This hook should be implemented to check whether the return values +/// described by the Outs array can fit into the return registers. If false +/// is returned, an sret-demotion is performed. +bool XtensaTargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, + const SmallVectorImpl &Outs, LLVMContext &Context) const { + SmallVector RVLocs; + CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); + return CCInfo.CheckReturn(Outs, RetCC_Xtensa); +} + +SDValue +XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SDLoc &DL, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + + // Assign locations to each returned value. + SmallVector RetLocs; + CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); + RetCCInfo.AnalyzeReturn(Outs, RetCC_Xtensa); + + SDValue Glue; + // Quick exit for void returns + if (RetLocs.empty()) + return DAG.getNode(Subtarget.isWinABI() ? XtensaISD::RETW_FLAG + : XtensaISD::RET_FLAG, + DL, MVT::Other, Chain); + + // Copy the result values into the output registers. + SmallVector RetOps; + RetOps.push_back(Chain); + for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { + CCValAssign &VA = RetLocs[I]; + SDValue RetValue = OutVals[I]; + + // Make the return register live on exit. + assert(VA.isRegLoc() && "Can only return in registers!"); + + // Promote the value as required. + RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); + + // Chain and glue the copies together. + unsigned Reg = VA.getLocReg(); + Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); + Glue = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); + } + + // Update chain and glue. + RetOps[0] = Chain; + if (Glue.getNode()) + RetOps.push_back(Glue); + + return DAG.getNode(Subtarget.isWinABI() ? XtensaISD::RETW_FLAG + : XtensaISD::RET_FLAG, + DL, MVT::Other, RetOps); +} + +static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, SDLoc dl, + SelectionDAG &DAG, int &br_code) { + // Minor optimization: if LHS is a constant, swap operands, then the + // constant can be folded into comparison. + if (LHS.getOpcode() == ISD::Constant) + std::swap(LHS, RHS); + int cmp_code = 0; + + switch (CC) { + default: + llvm_unreachable("Invalid condition!"); + break; + case ISD::SETUNE: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOEQ; + break; + case ISD::SETUO: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPUO; + break; + case ISD::SETO: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPUO; + break; + case ISD::SETUEQ: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPUEQ; + break; + case ISD::SETULE: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPULE; + break; + case ISD::SETULT: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPULT; + break; + case ISD::SETEQ: + case ISD::SETOEQ: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPOEQ; + break; + case ISD::SETNE: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOEQ; + break; + case ISD::SETLE: + case ISD::SETOLE: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPOLE; + break; + case ISD::SETLT: + case ISD::SETOLT: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPOLT; + break; + case ISD::SETGE: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOLT; + break; + case ISD::SETGT: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOLE; + break; + } + return DAG.getNode(cmp_code, dl, MVT::i1, LHS, RHS); +} + +SDValue XtensaTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + ISD::CondCode CC = cast(Op.getOperand(1))->get(); + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); + SDLoc DL(Op); + + if (LHS.getValueType() == MVT::f32) { + int br_code; + SDValue Flag = EmitCMP(LHS, RHS, CC, DL, DAG, br_code); + return DAG.getNode(br_code, DL, Op.getValueType(), Chain, Flag, Dest); + } else { // MVT::i32 + SDValue setcc = + DAG.getNode(ISD::SETCC, DL, MVT::i32, LHS, RHS, DAG.getCondCode(CC)); + return DAG.getNode(ISD::BRCOND, DL, Op.getValueType(), Chain, setcc, Dest); + } +} + +SDValue XtensaTargetLowering::lowerSELECT_CC(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT Ty = Op.getOperand(0).getValueType(); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue TrueV = Op.getOperand(2); + SDValue FalseV = Op.getOperand(3); + ISD::CondCode CC = cast(Op->getOperand(4))->get(); + SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32); + + // Wrap select nodes + if (LHS.getValueType() == MVT::f32) + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, + RHS, TrueV, FalseV, TargetCC); + else if (TrueV.getValueType() == MVT::f32) + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, + RHS, TrueV, FalseV, TargetCC); + else + return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueV, FalseV, + TargetCC); +} + +SDValue XtensaTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT Ty = Op.getOperand(0).getValueType(); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + ISD::CondCode CC = cast(Op.getOperand(2))->get(); + SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32); + + // Check Op SDNode users + // If there are only CALL/CALLW nodes, don't expand Global Address + SDNode &OpNode = *Op.getNode(); + bool Val = false; + for (SDNode::use_iterator UI = OpNode.use_begin(); UI != OpNode.use_end(); + ++UI) { + SDNode &User = *UI.getUse().getUser(); + unsigned OpCode = User.getOpcode(); + if (OpCode == ISD::BRCOND) { + Val = true; + break; + } + } + + // SETCC has BRCOND predecessor, return original operation + if (Val) + return Op; + + // Expand to target SELECT_CC + SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType()); + SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType()); + + if (LHS.getValueType() == MVT::f32) + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, + RHS, TrueV, FalseV, TargetCC); + else if (TrueV.getValueType() == MVT::f32) + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, + RHS, TrueV, FalseV, TargetCC); + else + return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueV, FalseV, + TargetCC); +} + +SDValue XtensaTargetLowering::lowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + // check the depth + // TODO: xtensa-gcc can handle this, by navigating through the stack, we + // should be able to do this too + assert((cast(Op.getOperand(0))->getZExtValue() == 0) && + "Return address can be determined only for current frame."); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + MVT VT = Op.getSimpleValueType(); + unsigned RA = Xtensa::A0; + MFI.setReturnAddressIsTaken(true); + + // Return RA, which contains the return address. Mark it an implicit + // live-in. + unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT)); + return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT); +} + +SDValue XtensaTargetLowering::lowerImmediate(SDValue Op, + SelectionDAG &DAG) const { + const ConstantSDNode *CN = cast(Op); + SDLoc DL(CN); + APInt apval = CN->getAPIntValue(); + int64_t value = apval.getSExtValue(); + if (Op.getValueType() == MVT::i32) { + if (value > -2048 && value <= 2047) + return Op; + Type *Ty = Type::getInt32Ty(*DAG.getContext()); + Constant *CV = ConstantInt::get(Ty, value); + SDValue CP = DAG.getConstantPool(CV, MVT::i32, 0, 0, false); + return CP; + } else if (Op.getValueType() == MVT::i64) { + // TODO long constants + } + return Op; +} + +SDValue XtensaTargetLowering::lowerImmediateFP(SDValue Op, + SelectionDAG &DAG) const { + const ConstantFPSDNode *CN = cast(Op); + SDLoc DL(CN); + APFloat apval = CN->getValueAPF(); + int64_t value = FloatToBits(CN->getValueAPF().convertToFloat()); + if (Op.getValueType() == MVT::f32) { + Type *Ty = Type::getInt32Ty(*DAG.getContext()); + Constant *CV = ConstantInt::get(Ty, value); + SDValue CP = DAG.getConstantPool(CV, MVT::i32, 0, 0, false); + return DAG.getNode(ISD::BITCAST, DL, MVT::f32, CP); + } else if (Op.getValueType() == MVT::f64) { + // TODO long constants + } + return Op; +} + +#include + +SDValue XtensaTargetLowering::lowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + // Reloc::Model RM = DAG.getTarget().getRelocationModel(); + SDLoc DL(Op); + + if (GlobalAddressSDNode *G = dyn_cast(Op)) { + auto PtrVt = getPointerTy(DAG.getDataLayout()); + const GlobalValue *GV = G->getGlobal(); + + // Check Op SDNode users + // If there are only CALL/CALLW nodes, don't expand Global Address + SDNode &OpNode = *Op.getNode(); + bool Val = false; + for (SDNode::use_iterator UI = OpNode.use_begin(); UI != OpNode.use_end(); + ++UI) { + SDNode &User = *UI.getUse().getUser(); + unsigned OpCode = User.getOpcode(); + if (OpCode != XtensaISD::CALL && OpCode != XtensaISD::CALLW) { + Val = true; + break; + } + } + if (!Val) { + SDValue TargAddr = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVt, + 0, 0 /* TargetFlags */); + return TargAddr; + } + + SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVt, 4); + SDValue CPWrap = getAddrPCRel(CPAddr, DAG); + + return CPWrap; + } + llvm_unreachable("invalid global addresses to lower"); +} + +SDValue XtensaTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *GA, + SelectionDAG &DAG) const { + SDLoc DL(GA); + const GlobalValue *GV = GA->getGlobal(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + if (DAG.getTarget().useEmulatedTLS()) + return LowerToTLSEmulatedModel(GA, DAG); + + TLSModel::Model model = getTargetMachine().getTLSModel(GV); + + if (!Subtarget.hasTHREADPTR()) { + llvm_unreachable("only emulated TLS supported"); + } + + if ((model == TLSModel::LocalExec) || (model == TLSModel::InitialExec)) { + auto PtrVt = getPointerTy(DAG.getDataLayout()); + + bool Priv = GV->isPrivateLinkage(GV->getLinkage()); + // Create a constant pool entry for the callee address + XtensaConstantPoolValue *CPV = XtensaConstantPoolSymbol::Create( + *DAG.getContext(), GV->getName().str().c_str() /* Sym */, + 0 /* XtensaCLabelIndex */, Priv, XtensaCP::TPOFF); + + // Get the address of the callee into a register + SDValue CPAddr = + DAG.getTargetConstantPool(CPV, PtrVt, 4, 0, XtensaII::MO_TPOFF); + SDValue CPWrap = getAddrPCRel(CPAddr, DAG); + + SDValue TPRegister = DAG.getRegister(Xtensa::THREADPTR, MVT::i32); + SDValue ThreadPointer = + DAG.getNode(XtensaISD::RUR, DL, MVT::i32, TPRegister); + return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, CPWrap); + } else + llvm_unreachable("only local-exec and initial-exec TLS mode supported"); + + return SDValue(); +} + +SDValue XtensaTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, + SelectionDAG &DAG) const { + const BlockAddress *BA = Node->getBlockAddress(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + XtensaConstantPoolValue *CPV = + XtensaConstantPoolConstant::Create(BA, 0, XtensaCP::CPBlockAddress, 0); + SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); + + SDValue CPWrap = getAddrPCRel(CPAddr, DAG); + return CPWrap; +} + +SDValue XtensaTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + SDValue Table = Op.getOperand(1); + SDValue Index = Op.getOperand(2); + SDLoc DL(Op); + JumpTableSDNode *JT = cast(Table); + MachineFunction &MF = DAG.getMachineFunction(); + const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); + + SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); + + const DataLayout &TD = DAG.getDataLayout(); + EVT PTy = getPointerTy(TD); + + unsigned EntrySize = MJTI->getEntrySize(TD); + + Index = DAG.getNode(ISD::MUL, DL, Index.getValueType(), Index, + DAG.getConstant(EntrySize, DL, Index.getValueType())); + SDValue Addr = DAG.getNode(ISD::ADD, DL, Index.getValueType(), Index, Table); + + EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); + SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, DL, PTy, Chain, Addr, + MachinePointerInfo::getJumpTable(MF), MemVT); + Addr = LD; + + return DAG.getNode(XtensaISD::BR_JT, DL, MVT::Other, LD.getValue(1), Addr, + TargetJT); +} + +SDValue XtensaTargetLowering::lowerJumpTable(JumpTableSDNode *JT, + SelectionDAG &DAG) const { + SDLoc DL(JT); + EVT PtrVt = getPointerTy(DAG.getDataLayout()); + + // Create a constant pool entry for the callee address + XtensaConstantPoolValue *CPV = + XtensaConstantPoolJumpTable::Create(*DAG.getContext(), JT->getIndex()); + + // Get the address of the callee into a register + SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); + SDValue CPWrap = getAddrPCRel(CPAddr, DAG); + + return CPWrap; +} + +SDValue XtensaTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, + SelectionDAG &DAG) const { + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + SDValue Result; + if (CP->isMachineConstantPoolEntry()) + Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, + CP->getAlignment()); + else + Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, + CP->getAlignment(), CP->getOffset()); + + return getAddrPCRel(Result, DAG); +} + +SDValue XtensaTargetLowering::lowerVASTART(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + XtensaFunctionInfo *XtensaFI = MF.getInfo(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + SDLoc DL(Op); + + SDValue Chain = Op.getOperand(0); + SDValue Addr = Op.getOperand(1); + + // typedef struct __va_list_tag { + // int32_t *__va_stk; /* Initialized to point to the position of the + // * first argument in memory offset to account for + // the + // * arguments passed in registers and to account for + // * the size of the argument registers not being + // 16-byte + // * aligned. E.G., there are 6 argument registers + // * of 4 bytes each, but we want the __va_ndx for the + // * first stack argument to have the maximal + // * alignment of 16 bytes, so we offset the __va_stk + // address by + // * 32 bytes so that __va_stk[32] references the + // first + // * argument on the stack. + // */ + // int32_t *__va_reg; /* Points to a stack-allocated region holding the + // * contents + // * of the incoming argument registers + // */ + // int32_t __va_ndx; /* Index initialized to the position of the first + // * unnamed (variable) argument. This same index is + // also + // * used to address the arguments passed in memory. + // */ + // } __va_list_tag[1]; + + SDValue ArgAR = + DAG.getConstant(XtensaFI->getVarArgsFirstGPR() * 4 - 8, DL, MVT::i32); + SDValue StackOffsetFI = + DAG.getFrameIndex(XtensaFI->getVarArgsStackOffset(), PtrVT); + + SDValue FR = DAG.getFrameIndex(XtensaFI->getVarArgsFrameIndex(), PtrVT); + + uint64_t FrameOffset = PtrVT.getSizeInBits() / 8; + SDValue ConstFrameOffset1 = DAG.getConstant(FrameOffset, DL, PtrVT); + SDValue ConstFrameOffset2 = DAG.getConstant(FrameOffset * 2, DL, PtrVT); + + const Value *SV = cast(Op.getOperand(2))->getValue(); + + // Store first word : arguments given in stack (__va_stk) + // Advance Argument Overflow pointer down, lest it will point to start + // after register argument va_arg finished + SDValue OverflowPtrAdvance = DAG.getConstant(32, DL, PtrVT); + SDValue StackOffsetFICorr = + DAG.getNode(ISD::SUB, DL, PtrVT, StackOffsetFI, OverflowPtrAdvance); + SDValue firstStore = + DAG.getStore(Chain, DL, StackOffsetFICorr, Addr, MachinePointerInfo(SV)); + + uint64_t nextOffset = FrameOffset; + SDValue nextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, Addr, ConstFrameOffset1); + + // Store second word : arguments given on registers (__va_reg) + SDValue FRAdvance = + DAG.getConstant(XtensaFI->getVarArgsFirstGPR() * 4 - 8, DL, PtrVT); + SDValue FRDecr = DAG.getNode(ISD::SUB, DL, PtrVT, FR, FRAdvance); + SDValue secondStore = DAG.getStore(firstStore, DL, FRDecr, nextPtr, + MachinePointerInfo(SV, nextOffset)); + nextOffset += FrameOffset; + nextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, Addr, ConstFrameOffset2); + + // Store first word : number of int regs (__va_ndx) + return DAG.getStore(secondStore, DL, ArgAR, nextPtr, + MachinePointerInfo(SV, nextOffset)); +} + +SDValue XtensaTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const { + SDNode *Node = Op.getNode(); + EVT VT = Node->getValueType(0); + SDValue InChain = Node->getOperand(0); + SDValue VAListPtr = Node->getOperand(1); + EVT PtrVT = VAListPtr.getValueType(); + const Value *SV = cast(Node->getOperand(2))->getValue(); + SDLoc DL(Node); + int Offset = Node->getConstantOperandVal(3); + + SDValue ARAreaPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAListPtr, + DAG.getConstant(8, DL, MVT::i32)); + SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAListPtr, + DAG.getConstant(4, DL, MVT::i32)); + SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAListPtr, + DAG.getConstant(0, DL, MVT::i32)); + + // areas + SDValue ARIndex = + DAG.getLoad(MVT::i32, DL, InChain, ARAreaPtr, MachinePointerInfo()); + InChain = ARIndex.getValue(1); + + SDValue OverflowArea = + DAG.getLoad(MVT::i32, DL, InChain, OverflowAreaPtr, MachinePointerInfo()); + InChain = OverflowArea.getValue(1); + + SDValue RegSaveArea = + DAG.getLoad(MVT::i32, DL, InChain, RegSaveAreaPtr, MachinePointerInfo()); + InChain = RegSaveArea.getValue(1); + + // We must align Argument register number to even for 64-bit arguments + if (VT == MVT::i64 || Offset == 8) { + SDValue Const4 = DAG.getConstant(4, DL, MVT::i32); + SDValue IndexIncr = DAG.getNode(ISD::ADD, DL, MVT::i32, ARIndex, Const4); + + SDValue ConstN7 = DAG.getConstant(~7, DL, MVT::i32); + SDValue IndexMasked = + DAG.getNode(ISD::AND, DL, MVT::i32, IndexIncr, ConstN7); + + InChain = DAG.getStore(InChain, DL, IndexMasked, ARAreaPtr, + MachinePointerInfo(SV)); + ARIndex = IndexMasked; + } + + int LastArgIdx = 4 * 6; // 6 - index of + // Xtensa::a7, last argument register + 1 + SDValue CC = + DAG.getSetCC(DL, MVT::i32, ARIndex, + DAG.getConstant(LastArgIdx, DL, MVT::i32), ISD::SETLT); + + // OurReg = RegSaveArea + ARIndex + SDValue OurReg = DAG.getNode(ISD::ADD, DL, PtrVT, RegSaveArea, ARIndex); + // OurOverflow = OverflowArea + ARIndex + SDValue ARIndexCorrect = DAG.getNode( + ISD::ADD, DL, PtrVT, DAG.getConstant(8, DL, MVT::i32), ARIndex); + SDValue OurOverflow = + DAG.getNode(ISD::ADD, DL, PtrVT, OverflowArea, ARIndexCorrect); + + // determine if we should load from Register save area or Overflow area + SDValue Result = DAG.getNode(ISD::SELECT, DL, PtrVT, CC, OurReg, OurOverflow); + + // increase AR Index by 4 (or 8 if VT is i64) + SDValue IndexPlus1 = + DAG.getNode(ISD::ADD, DL, MVT::i32, ARIndex, + DAG.getConstant(VT == MVT::i64 ? 8 : 4, DL, MVT::i32)); + + InChain = DAG.getStore(InChain, DL, IndexPlus1, ARAreaPtr, + MachinePointerInfo(/*SV*/)); + + return DAG.getLoad(VT, DL, InChain, Result, MachinePointerInfo()); +} + +SDValue XtensaTargetLowering::lowerVACOPY(SDValue Op, SelectionDAG &DAG) const { + // We have to copy the entire va_list struct: + // 2*sizeof(int*) + sizeof(int) = 12 Byte + unsigned VAListSize = 12; + return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), + DAG.getConstant(VAListSize, SDLoc(Op), MVT::i32), 8, + false, true, false, MachinePointerInfo(), + MachinePointerInfo()); +} + +SDValue XtensaTargetLowering::lowerATOMIC_FENCE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + SDValue Chain = Op.getOperand(0); + return DAG.getNode(XtensaISD::MEMW, DL, MVT::Other, Chain); +} + +SDValue XtensaTargetLowering::lowerSTACKSAVE(SDValue Op, + SelectionDAG &DAG) const { + unsigned sp = Xtensa::SP; + return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), sp, Op.getValueType()); +} + +SDValue XtensaTargetLowering::lowerSTACKRESTORE(SDValue Op, + SelectionDAG &DAG) const { + unsigned sp = Xtensa::SP; + if (Subtarget.isWinABI()) { + SDValue NewSP = + DAG.getNode(XtensaISD::MOVSP, SDLoc(Op), MVT::i32, Op.getOperand(1)); + return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), sp, NewSP); + } else { + return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), sp, Op.getOperand(1)); + } +} + +SDValue XtensaTargetLowering::lowerFRAMEADDR(SDValue Op, + SelectionDAG &DAG) const { + // check the depth + assert((cast(Op.getOperand(0))->getZExtValue() == 0) && + "Frame address can only be determined for current frame."); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + MFI.setFrameAddressIsTaken(true); + EVT VT = Op.getValueType(); + SDLoc DL(Op); + + unsigned FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF); + SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); + return FrameAddr; +} + +SDValue XtensaTargetLowering::lowerDYNAMIC_STACKALLOC(SDValue Op, + SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); // Legalize the chain. + SDValue Size = Op.getOperand(1); // Legalize the size. + EVT VT = Size->getValueType(0); + SDLoc DL(Op); + + // Round up Size to 32 + SDValue Size1 = + DAG.getNode(ISD::ADD, DL, VT, Size, DAG.getConstant(31, DL, MVT::i32)); + SDValue SizeRoundUp = + DAG.getNode(ISD::AND, DL, VT, Size1, DAG.getConstant(~31, DL, MVT::i32)); + + unsigned SPReg = Xtensa::SP; + SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT); + SDValue NewSP = DAG.getNode(ISD::SUB, DL, VT, SP, SizeRoundUp); // Value + if (Subtarget.isWinABI()) { + SDValue NewSP1 = DAG.getNode(XtensaISD::MOVSP, DL, MVT::i32, NewSP); + Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSP1); // Output chain + } else { + Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSP); // Output chain + } + + SDValue NewVal = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i32); + Chain = NewVal.getValue(1); + + SDValue Ops[2] = {NewVal, Chain}; + return DAG.getMergeValues(Ops, DL); +} + +SDValue XtensaTargetLowering::lowerShiftLeftParts(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MVT VT = MVT::i32; + + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + + SDValue SetShiftLeft = DAG.getNode(XtensaISD::SSL, DL, MVT::Glue, Shamt); + SDValue ShiftLeftHi = + DAG.getNode(XtensaISD::SRC, DL, VT, Hi, Lo, SetShiftLeft); + SDValue SetShiftLeft1 = DAG.getNode(XtensaISD::SSL, DL, MVT::Glue, Shamt); + SDValue ShiftLeftLo = DAG.getNode(XtensaISD::SHL, DL, VT, Lo, SetShiftLeft1); + SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt, + DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32)); + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, DAG.getConstant(0, DL, VT), + ShiftLeftLo); + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, ShiftLeftHi); + + SDValue Ops[2] = {Lo, Hi}; + return DAG.getMergeValues(Ops, DL); +} + +SDValue XtensaTargetLowering::lowerShiftRightParts(SDValue Op, + SelectionDAG &DAG, + bool IsSRA) const { + SDLoc DL(Op); + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + MVT VT = MVT::i32; + + if (IsSRA) { + SDValue SetShiftRight1 = DAG.getNode(XtensaISD::SSR, DL, MVT::Glue, Shamt); + SDValue ShiftRightLo1 = + DAG.getNode(XtensaISD::SRC, DL, VT, Hi, Lo, SetShiftRight1); + + SDValue SetShiftRight2 = DAG.getNode(XtensaISD::SSR, DL, MVT::Glue, Shamt); + SDValue ShiftRightHi1 = + DAG.getNode(XtensaISD::SRA, DL, VT, Hi, SetShiftRight2); + + SDValue SetShiftRight3 = DAG.getNode(XtensaISD::SSR, DL, MVT::Glue, Shamt); + SDValue ShiftRightLo2 = + DAG.getNode(XtensaISD::SRA, DL, VT, Hi, SetShiftRight3); + + SDValue ShiftRightHi2 = + DAG.getNode(ISD::SRA, DL, VT, Hi, DAG.getConstant(31, DL, VT)); + + SDValue Cond = + DAG.getNode(ISD::AND, DL, MVT::i32, Shamt, + DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32)); + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi2, ShiftRightHi1); + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightLo2, ShiftRightLo1); + } else { + SDValue SetShiftRight1 = DAG.getNode(XtensaISD::SSR, DL, MVT::Glue, Shamt); + SDValue ShiftRightLo1 = + DAG.getNode(XtensaISD::SRC, DL, VT, Hi, Lo, SetShiftRight1); + + SDValue SetShiftRight2 = DAG.getNode(XtensaISD::SSR, DL, MVT::Glue, Shamt); + SDValue ShiftRightHi1 = + DAG.getNode(XtensaISD::SRL, DL, VT, Hi, SetShiftRight2); + + SDValue SetShiftRight3 = DAG.getNode(XtensaISD::SSR, DL, MVT::Glue, Shamt); + SDValue ShiftRightLo2 = + DAG.getNode(XtensaISD::SRL, DL, VT, Hi, SetShiftRight3); + + SDValue Cond = + DAG.getNode(ISD::AND, DL, MVT::i32, Shamt, + DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32)); + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, DAG.getConstant(0, DL, VT), + ShiftRightHi1); + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightLo2, ShiftRightLo1); + } + + SDValue Ops[2] = {Lo, Hi}; + return DAG.getMergeValues(Ops, DL); +} + +SDValue XtensaTargetLowering::LowerOperation(SDValue Op, + SelectionDAG &DAG) const { + switch (Op.getOpcode()) { + case ISD::BR_JT: + return lowerBR_JT(Op, DAG); + case ISD::Constant: + return lowerImmediate(Op, DAG); + case ISD::ConstantFP: + return lowerImmediateFP(Op, DAG); + case ISD::RETURNADDR: + return lowerRETURNADDR(Op, DAG); + case ISD::BR_CC: + return lowerBR_CC(Op, DAG); + case ISD::SETCC: + return lowerSETCC(Op, DAG); + case ISD::SELECT_CC: + return lowerSELECT_CC(Op, DAG); + case ISD::GlobalAddress: + return lowerGlobalAddress(Op, DAG); + case ISD::GlobalTLSAddress: + return lowerGlobalTLSAddress(cast(Op), DAG); + case ISD::BlockAddress: + return lowerBlockAddress(cast(Op), DAG); + case ISD::JumpTable: + return lowerJumpTable(cast(Op), DAG); + case ISD::ConstantPool: + return lowerConstantPool(cast(Op), DAG); + case ISD::VASTART: + return lowerVASTART(Op, DAG); + case ISD::VAARG: + return lowerVAARG(Op, DAG); + case ISD::VACOPY: + return lowerVACOPY(Op, DAG); + case ISD::ATOMIC_FENCE: + return lowerATOMIC_FENCE(Op, DAG); + case ISD::STACKSAVE: + return lowerSTACKSAVE(Op, DAG); + case ISD::STACKRESTORE: + return lowerSTACKRESTORE(Op, DAG); + case ISD::FRAMEADDR: + return lowerFRAMEADDR(Op, DAG); + case ISD::DYNAMIC_STACKALLOC: + return lowerDYNAMIC_STACKALLOC(Op, DAG); + case ISD::SHL_PARTS: + return lowerShiftLeftParts(Op, DAG); + case ISD::SRA_PARTS: + return lowerShiftRightParts(Op, DAG, true); + case ISD::SRL_PARTS: + return lowerShiftRightParts(Op, DAG, false); + default: + llvm_unreachable("Unexpected node to lower"); + } +} + +const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { +#define OPCODE(NAME) \ + case XtensaISD::NAME: \ + return "XtensaISD::" #NAME + switch (Opcode) { + OPCODE(RET_FLAG); + OPCODE(RETW_FLAG); + OPCODE(CALL); + OPCODE(CALLW); + OPCODE(PCREL_WRAPPER); + OPCODE(SELECT); + OPCODE(SELECT_CC); + OPCODE(SELECT_CC_FP); + OPCODE(BR_CC_T); + OPCODE(BR_CC_F); + OPCODE(BR_JT); + OPCODE(CMPUO); + OPCODE(CMPUEQ); + OPCODE(CMPULE); + OPCODE(CMPULT); + OPCODE(CMPOEQ); + OPCODE(CMPOLE); + OPCODE(CMPOLT); + OPCODE(MOVT); + OPCODE(MOVF); + OPCODE(MADD); + OPCODE(MSUB); + OPCODE(MOVS); + OPCODE(MOVSP); + OPCODE(SHL); + OPCODE(SRA); + OPCODE(SRL); + OPCODE(SRC); + OPCODE(SSL); + OPCODE(SSR); + OPCODE(MEMW); + OPCODE(S32C1I); + OPCODE(WSR); + OPCODE(RUR); + } + return NULL; +#undef OPCODE +} + +//===----------------------------------------------------------------------===// +// Custom insertion +//===----------------------------------------------------------------------===// + +// Call pseduo ops for ABI compliant calls (output is always ra) +MachineBasicBlock *XtensaTargetLowering::emitCALL(MachineInstr *MI, + MachineBasicBlock *BB) const { + // TODO + return BB; +} + +static int GetBranchKind(int Cond, bool &BrInv) { + switch (Cond) { + case ISD::SETEQ: + case ISD::SETOEQ: + case ISD::SETUEQ: + return Xtensa::BEQ; + case ISD::SETNE: + case ISD::SETONE: + case ISD::SETUNE: + return Xtensa::BNE; + case ISD::SETLT: + case ISD::SETOLT: + return Xtensa::BLT; + case ISD::SETLE: + case ISD::SETOLE: + BrInv = true; + return Xtensa::BGE; + case ISD::SETGT: + case ISD::SETOGT: + BrInv = true; + return Xtensa::BLT; + case ISD::SETGE: + case ISD::SETOGE: + return Xtensa::BGE; + case ISD::SETULT: + return Xtensa::BLTU; + case ISD::SETULE: + BrInv = true; + return Xtensa::BGEU; + case ISD::SETUGT: + BrInv = true; + return Xtensa::BLTU; + case ISD::SETUGE: + return Xtensa::BGEU; + default: + return -1; + } +} + +static void GetFPBranchKind(int Cond, int &BrKind, int &CmpKind) { + + switch (Cond) { + default: + llvm_unreachable("Invalid condition!"); + break; + case ISD::SETUNE: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OEQ_S; + break; + case ISD::SETUO: + BrKind = Xtensa::BT; + CmpKind = Xtensa::UN_S; + break; + case ISD::SETO: + BrKind = Xtensa::BF; + CmpKind = Xtensa::UN_S; + break; + case ISD::SETUEQ: + BrKind = Xtensa::BT; + CmpKind = Xtensa::UEQ_S; + break; + case ISD::SETULE: + BrKind = Xtensa::BT; + CmpKind = Xtensa::ULE_S; + break; + case ISD::SETULT: + BrKind = Xtensa::BT; + CmpKind = Xtensa::ULT_S; + break; + case ISD::SETEQ: + case ISD::SETOEQ: + BrKind = Xtensa::BT; + CmpKind = Xtensa::OEQ_S; + break; + case ISD::SETNE: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OEQ_S; + break; + case ISD::SETLE: + case ISD::SETOLE: + BrKind = Xtensa::BT; + CmpKind = Xtensa::OLE_S; + break; + case ISD::SETLT: + case ISD::SETOLT: + BrKind = Xtensa::BT; + CmpKind = Xtensa::OLT_S; + break; + case ISD::SETGE: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OLT_S; + break; + case ISD::SETGT: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OLE_S; + break; + } +} + +MachineBasicBlock * +XtensaTargetLowering::emitSelectCC(MachineInstr &MI, + MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + MachineOperand &LHS = MI.getOperand(1); + MachineOperand &RHS = MI.getOperand(2); + MachineOperand &TrueV = MI.getOperand(3); + MachineOperand &FalseV = MI.getOperand(4); + MachineOperand &Cond = MI.getOperand(5); + + // To "insert" a SELECT_CC instruction, we actually have to insert the + // diamond control-flow pattern. The incoming instruction knows the + // destination vreg to set, the condition code register to branch on, the + // true/false values to select between, and a branch opcode to use. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + // thisMBB: + // ... + // TrueVal = ... + // cmpTY ccX, r1, r2 + // bCC copy1MBB + // fallthrough --> copy0MBB + MachineBasicBlock *thisMBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, copy0MBB); + F->insert(It, sinkMBB); + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(BB); + + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(copy0MBB); + BB->addSuccessor(sinkMBB); + + if ((MI.getOpcode() == Xtensa::SELECT_CC_FP_FP) || + (MI.getOpcode() == Xtensa::SELECT_CC_FP_INT)) { + int BrKind = 0; + int CmpKind = 0; + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i1); + unsigned b = RegInfo.createVirtualRegister(RC); + GetFPBranchKind(Cond.getImm(), BrKind, CmpKind); + BuildMI(BB, DL, TII.get(CmpKind), b) + .addReg(LHS.getReg()) + .addReg(RHS.getReg()); + BuildMI(BB, DL, TII.get(BrKind)).addReg(b).addMBB(sinkMBB); + } else { + /*int BrKind = GetBranchKind(Cond.getImm()); + BuildMI(BB, DL, TII.get(BrKind)) + .addReg(LHS.getReg()) + .addReg(RHS.getReg()) + .addMBB(sinkMBB);*/ + bool BrInv = false; + int BrKind = GetBranchKind(Cond.getImm(), BrInv); + if (BrInv) { + BuildMI(BB, DL, TII.get(BrKind)) + .addReg(RHS.getReg()) + .addReg(LHS.getReg()) + .addMBB(sinkMBB); + } else { + BuildMI(BB, DL, TII.get(BrKind)) + .addReg(LHS.getReg()) + .addReg(RHS.getReg()) + .addMBB(sinkMBB); + } + } + + // copy0MBB: + // %FalseValue = ... + // # fallthrough to sinkMBB + BB = copy0MBB; + + // Update machine-CFG edges + BB->addSuccessor(sinkMBB); + + // sinkMBB: + // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] + // ... + BB = sinkMBB; + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), MI.getOperand(0).getReg()) + .addReg(FalseV.getReg()) + .addMBB(copy0MBB) + .addReg(TrueV.getReg()) + .addMBB(thisMBB); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + +// Emit instructions for atomic_cmp_swap node for 8/16 bit operands +MachineBasicBlock * +XtensaTargetLowering::emitAtomicCmpSwap(MachineInstr &MI, MachineBasicBlock *BB, + int isByteOperand) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineBasicBlock *thisBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BBExit. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomValAddr = MI.getOperand(1); + MachineOperand &CmpVal = MI.getOperand(2); + MachineOperand &SwpVal = MI.getOperand(3); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned r1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), r1).addImm(3); + + unsigned byte_offs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), byte_offs) + .addReg(r1) + .addReg(AtomValAddr.getReg()); + + unsigned addr_align = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SUB), addr_align) + .addReg(AtomValAddr.getReg()) + .addReg(byte_offs); + + unsigned bit_offs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), bit_offs) + .addReg(byte_offs) + .addImm(3); + + unsigned mask1 = MRI.createVirtualRegister(RC); + if (isByteOperand) { + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), mask1).addImm(0xff); + } else { + unsigned r2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), r2).addImm(1); + unsigned r3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), r3).addReg(r2).addImm(16); + BuildMI(*BB, MI, DL, TII.get(Xtensa::ADDI), mask1).addReg(r3).addImm(-1); + } + + BuildMI(*BB, MI, DL, TII.get(Xtensa::SSL)).addReg(bit_offs); + + unsigned r2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), r2).addImm(-1); + + unsigned mask2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), mask2).addReg(mask1); + + unsigned mask3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), mask3).addReg(mask2).addReg(r2); + + unsigned r3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), r3).addReg(addr_align).addImm(0); + + unsigned r4 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), r4).addReg(r3).addReg(mask3); + + unsigned cmp1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), cmp1).addReg(CmpVal.getReg()); + + unsigned swp1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), swp1).addReg(SwpVal.getReg()); + + BB = BBLoop; + + unsigned maskPhi = MRI.createVirtualRegister(RC); + unsigned maskLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), maskPhi) + .addReg(maskLoop) + .addMBB(BBLoop) + .addReg(r4) + .addMBB(thisBB); + + unsigned cmp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), cmp2).addReg(cmp1).addReg(maskPhi); + + unsigned swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), swp2).addReg(swp1).addReg(maskPhi); + + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(cmp2); + + unsigned swp3 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), swp3) + .addReg(swp2) + .addReg(addr_align) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::AND), maskLoop).addReg(swp3).addReg(mask3); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(maskLoop) + .addReg(maskPhi) + .addMBB(BBLoop); + + BB->addSuccessor(BBLoop); + BB->addSuccessor(BBExit); + + BB = BBExit; + auto st = BBExit->begin(); + + unsigned r5 = MRI.createVirtualRegister(RC); + BuildMI(*BB, st, DL, TII.get(Xtensa::SSR)).addReg(bit_offs); + + BuildMI(*BB, st, DL, TII.get(Xtensa::SRL), r5).addReg(swp3); + + BuildMI(*BB, st, DL, TII.get(Xtensa::AND), Res.getReg()) + .addReg(r5) + .addReg(mask1); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + +MachineBasicBlock *XtensaTargetLowering::emitAtomicRMW(MachineInstr &MI, + MachineBasicBlock *BB, + unsigned Opcode, + bool inv, + bool minmax) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineBasicBlock *thisBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BB2. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomicValAddr = MI.getOperand(1); + MachineOperand &Val = MI.getOperand(2); + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned r1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), r1).add(AtomicValAddr).addImm(0); + + BB = BBLoop; + + unsigned atomicValPhi = MRI.createVirtualRegister(RC); + unsigned atomicValLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), atomicValPhi) + .addReg(atomicValLoop) + .addMBB(BBLoop) + .addReg(r1) + .addMBB(thisBB); + + unsigned r2 = MRI.createVirtualRegister(RC); + + if (minmax) { + MachineBasicBlock *BBLoop1 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop1); + BB->addSuccessor(BBLoop1); + MachineBasicBlock *BBLoop2 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop2); + BB->addSuccessor(BBLoop2); + + BuildMI(BB, DL, TII.get(Opcode)) + .addReg(atomicValPhi) + .addReg(Val.getReg()) + .addMBB(BBLoop1); + + unsigned r7 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), r7).addReg(Val.getReg()); + + BB = BBLoop1; + unsigned r8 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), r8).addReg(atomicValPhi); + BB->addSuccessor(BBLoop2); + + BB = BBLoop2; + unsigned r9 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), r9) + .addReg(r7) + .addMBB(BBLoop) + .addReg(r8) + .addMBB(BBLoop1); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), r2).addReg(r9); + } else { + BuildMI(BB, DL, TII.get(Opcode), r2) + .addReg(atomicValPhi) + .addReg(Val.getReg()); + if (inv) { + unsigned rtmp1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), rtmp1).addImm(-1); + unsigned rtmp2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), rtmp2) + .addReg(r2) + .addReg(rtmp1); + r2 = rtmp2; + } + } + + unsigned r4 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(atomicValPhi); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), r4) + .addReg(r2) + .addReg(AtomicValAddr.getReg()) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), atomicValLoop).addReg(r4); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(atomicValPhi) + .addReg(r4) + .addMBB(BBLoop); + + BB->addSuccessor(BBLoop); + BB->addSuccessor(BBExit); + + BB = BBExit; + auto st = BBExit->begin(); + + BuildMI(*BB, st, DL, TII.get(Xtensa::MOV_N), Res.getReg()).addReg(r4); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + + return BB; +} + +MachineBasicBlock * +XtensaTargetLowering::emitAtomicRMW(MachineInstr &MI, MachineBasicBlock *BB, + bool isByteOperand, unsigned Opcode, + bool inv, bool minmax) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineBasicBlock *thisBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BB2. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomValAddr = MI.getOperand(1); + MachineOperand &Val = MI.getOperand(2); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned r1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), r1).addImm(3); + + unsigned byte_offs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), byte_offs) + .addReg(r1) + .addReg(AtomValAddr.getReg()); + + unsigned addr_align = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SUB), addr_align) + .addReg(AtomValAddr.getReg()) + .addReg(byte_offs); + + unsigned bit_offs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), bit_offs) + .addReg(byte_offs) + .addImm(3); + + unsigned mask1 = MRI.createVirtualRegister(RC); + if (isByteOperand) { + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), mask1).addImm(0xff); + } else { + unsigned r2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), r2).addImm(1); + unsigned r3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), r3).addReg(r2).addImm(16); + BuildMI(*BB, MI, DL, TII.get(Xtensa::ADDI), mask1).addReg(r3).addImm(-1); + } + + BuildMI(*BB, MI, DL, TII.get(Xtensa::SSL)).addReg(bit_offs); + + unsigned r2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), r2).addImm(-1); + + unsigned mask2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), mask2).addReg(mask1); + + unsigned mask3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), mask3).addReg(mask2).addReg(r2); + + unsigned r3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), r3).addReg(addr_align).addImm(0); + + unsigned val1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), val1).addReg(Val.getReg()); + + BB = BBLoop; + + unsigned atomicValPhi = MRI.createVirtualRegister(RC); + unsigned atomicValLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), atomicValPhi) + .addReg(atomicValLoop) + .addMBB(BBLoop) + .addReg(r3) + .addMBB(thisBB); + + unsigned swp2; + + if (minmax) { + MachineBasicBlock *BBLoop1 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop1); + BB->addSuccessor(BBLoop1); + MachineBasicBlock *BBLoop2 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop2); + BB->addSuccessor(BBLoop2); + + unsigned r1 = MRI.createVirtualRegister(RC); + unsigned r2 = MRI.createVirtualRegister(RC); + unsigned r3 = MRI.createVirtualRegister(RC); + unsigned r4 = MRI.createVirtualRegister(RC); + + unsigned r5 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), r5) + .addReg(atomicValPhi) + .addReg(mask2); + + BuildMI(BB, DL, TII.get(Xtensa::SRL), r1).addReg(r5); + BuildMI(BB, DL, TII.get(Xtensa::SRL), r2).addReg(val1); + + if ((Opcode == Xtensa::BLT) || (Opcode == Xtensa::BGE)) { + if (isByteOperand) { + BuildMI(BB, DL, TII.get(Xtensa::SEXT), r3).addReg(r1).addImm(7); + BuildMI(BB, DL, TII.get(Xtensa::SEXT), r4).addReg(r2).addImm(7); + } else { + BuildMI(BB, DL, TII.get(Xtensa::SEXT), r3).addReg(r1).addImm(15); + BuildMI(BB, DL, TII.get(Xtensa::SEXT), r4).addReg(r2).addImm(15); + } + } else { + r3 = r1; + r4 = r2; + } + + BuildMI(BB, DL, TII.get(Opcode)).addReg(r3).addReg(r4).addMBB(BBLoop1); + + unsigned r7 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), r7).addReg(val1); + + BB = BBLoop1; + unsigned r8 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), r8).addReg(atomicValPhi); + BB->addSuccessor(BBLoop2); + + BB = BBLoop2; + unsigned r9 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), r9) + .addReg(r7) + .addMBB(BBLoop) + .addReg(r8) + .addMBB(BBLoop1); + + unsigned r10 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), r10) + .addReg(atomicValPhi) + .addReg(mask3); + + unsigned r11 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), r11).addReg(r9).addReg(mask2); + + swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), swp2).addReg(r10).addReg(r11); + } else { + unsigned r4 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), r4) + .addReg(atomicValPhi) + .addReg(mask2); + + unsigned res1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Opcode), res1).addReg(r4).addReg(val1); + + unsigned swp1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), swp1).addReg(res1).addReg(mask2); + + unsigned r5 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), r5) + .addReg(atomicValPhi) + .addReg(mask3); + + if (inv) { + unsigned rtmp1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::XOR), rtmp1) + .addReg(atomicValPhi) + .addReg(mask2); + r5 = rtmp1; + } + + swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), swp2).addReg(swp1).addReg(r5); + } + + unsigned swp3 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(atomicValPhi); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), swp3) + .addReg(swp2) + .addReg(addr_align) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), atomicValLoop).addReg(swp3); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(swp3) + .addReg(atomicValPhi) + .addMBB(BBLoop); + + BB->addSuccessor(BBLoop); + BB->addSuccessor(BBExit); + BB = BBExit; + auto st = BBExit->begin(); + + unsigned r6 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, st, DL, TII.get(Xtensa::SSR)).addReg(bit_offs); + + BuildMI(*BB, st, DL, TII.get(Xtensa::SRL), r6).addReg(atomicValLoop); + + BuildMI(*BB, st, DL, TII.get(Xtensa::AND), Res.getReg()) + .addReg(r6) + .addReg(mask1); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + + return BB; +} + +MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + DebugLoc DL = MI.getDebugLoc(); + + switch (MI.getOpcode()) { + case Xtensa::SELECT_CC_FP_FP: + case Xtensa::SELECT_CC_FP_INT: + case Xtensa::SELECT_CC_INT_FP: + case Xtensa::SELECT: + return emitSelectCC(MI, MBB); + + case Xtensa::SLL_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &S = MI.getOperand(1); + MachineOperand &SA = MI.getOperand(2); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSL)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLL), R.getReg()).addReg(S.getReg()); + MI.eraseFromParent(); + return MBB; + } + + case Xtensa::SRA_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &T = MI.getOperand(1); + MachineOperand &SA = MI.getOperand(2); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRA), R.getReg()).addReg(T.getReg()); + MI.eraseFromParent(); + return MBB; + } + + case Xtensa::SRL_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &T = MI.getOperand(1); + MachineOperand &SA = MI.getOperand(2); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRL), R.getReg()).addReg(T.getReg()); + MI.eraseFromParent(); + return MBB; + } + + case Xtensa::L8I_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &Op1 = MI.getOperand(1); + MachineOperand &Op2 = MI.getOperand(2); + + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned r_new = MRI.createVirtualRegister(RC); + + const MachineMemOperand &MMO = **MI.memoperands_begin(); + if (MMO.isVolatile()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + } + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L8UI), r_new).add(Op1).add(Op2); + if (Subtarget.hasSEXT()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SEXT), R.getReg()) + .addReg(r_new) + .addImm(7); + } else { + unsigned r_new1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLLI), r_new1) + .addReg(r_new) + .addImm(24); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRAI), R.getReg()) + .addReg(r_new1) + .addImm(24); + } + MI.eraseFromParent(); + return MBB; + } + + case Xtensa::ATOMIC_CMP_SWAP_8_P: { + return emitAtomicCmpSwap(MI, MBB, 1); + } + + case Xtensa::ATOMIC_CMP_SWAP_16_P: { + return emitAtomicCmpSwap(MI, MBB, 0); + } + + case Xtensa::ATOMIC_CMP_SWAP_32_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &Addr = MI.getOperand(1); + MachineOperand &Cmp = MI.getOperand(2); + MachineOperand &Swap = MI.getOperand(3); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1) + .addReg(Cmp.getReg()); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::S32C1I), R.getReg()) + .addReg(Swap.getReg()) + .addReg(Addr.getReg()) + .addImm(0); + + MI.eraseFromParent(); + return MBB; + } + + case Xtensa::ATOMIC_LOAD_ADD_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::ADD, false, false); + case Xtensa::ATOMIC_LOAD_SUB_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::SUB, false, false); + case Xtensa::ATOMIC_LOAD_OR_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::OR, false, false); + case Xtensa::ATOMIC_LOAD_XOR_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::XOR, false, false); + case Xtensa::ATOMIC_LOAD_AND_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::AND, false, false); + case Xtensa::ATOMIC_LOAD_NAND_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::AND, true, false); + case Xtensa::ATOMIC_LOAD_MIN_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BGE, false, true); + case Xtensa::ATOMIC_LOAD_MAX_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BLT, false, true); + case Xtensa::ATOMIC_LOAD_UMIN_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BGEU, false, true); + case Xtensa::ATOMIC_LOAD_UMAX_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BLTU, false, true); + + case Xtensa::ATOMIC_LOAD_ADD_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::ADD, false, false); + case Xtensa::ATOMIC_LOAD_SUB_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::SUB, false, false); + case Xtensa::ATOMIC_LOAD_OR_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::OR, false, false); + case Xtensa::ATOMIC_LOAD_XOR_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::XOR, false, false); + case Xtensa::ATOMIC_LOAD_AND_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::AND, false, false); + case Xtensa::ATOMIC_LOAD_NAND_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::AND, true, false); + case Xtensa::ATOMIC_LOAD_MIN_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BGE, false, true); + case Xtensa::ATOMIC_LOAD_MAX_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BLT, false, true); + case Xtensa::ATOMIC_LOAD_UMIN_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BGEU, false, true); + case Xtensa::ATOMIC_LOAD_UMAX_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BLTU, false, true); + + case Xtensa::ATOMIC_LOAD_ADD_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::ADD, false, false); + case Xtensa::ATOMIC_LOAD_SUB_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::SUB, false, false); + case Xtensa::ATOMIC_LOAD_OR_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::OR, false, false); + case Xtensa::ATOMIC_LOAD_XOR_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::XOR, false, false); + case Xtensa::ATOMIC_LOAD_AND_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::AND, false, false); + case Xtensa::ATOMIC_LOAD_NAND_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::AND, true, false); + case Xtensa::ATOMIC_LOAD_MIN_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BGE, false, true); + case Xtensa::ATOMIC_LOAD_MAX_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BLT, false, true); + case Xtensa::ATOMIC_LOAD_UMIN_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BGEU, false, true); + case Xtensa::ATOMIC_LOAD_UMAX_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BLTU, false, true); + case Xtensa::S8I: + case Xtensa::S16I: + case Xtensa::S32I: + case Xtensa::S32I_N: + case Xtensa::S32F: + case Xtensa::L8UI: + case Xtensa::L16SI: + case Xtensa::L16UI: + case Xtensa::L32I: + case Xtensa::L32I_N: + case Xtensa::L32F: { + const MachineMemOperand &MMO = **MI.memoperands_begin(); + if (MMO.isVolatile()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + } + return MBB; + } + default: + llvm_unreachable("Unexpected instr type to insert"); + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h new file mode 100644 index 0000000000000..f6b80345d0f16 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -0,0 +1,240 @@ +//===- XtensaISelLowering.h - Xtensa DAG Lowering Interface ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file defines the interfaces that Xtensa uses to lower LLVM code into a +// selection DAG. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAISELLOWERING_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAISELLOWERING_H + +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/TargetLowering.h" + +namespace llvm { +namespace XtensaISD { +enum { + FIRST_NUMBER = ISD::BUILTIN_OP_END, + + BR_CC_T, + BR_CC_F, + + // Calls a function. Operand 0 is the chain operand and operand 1 + // is the target address. The arguments start at operand 2. + // There is an optional glue operand at the end. + CALL, + // WinABI Call version + CALLW, + + // Floating point unordered compare conditions + CMPUEQ, + CMPULE, + CMPULT, + CMPUO, + // Floating point compare conditions + CMPOEQ, + CMPOLE, + CMPOLT, + // FP multipy-add/sub + MADD, + MSUB, + // FP move + MOVS, + + MOVSP, + + // Wraps a TargetGlobalAddress that should be loaded using PC-relative + // accesses. Operand 0 is the address. + PCREL_WRAPPER, + + // Return with a flag operand. Operand 0 is the chain operand. + RET_FLAG, + // WinABI Return + RETW_FLAG, + + // Selects between operand 0 and operand 1. Operand 2 is the + // mask of condition-code values for which operand 0 should be + // chosen over operand 1; it has the same form as BR_CCMASK. + // Operand 3 is the flag operand. + SELECT, + SELECT_CC, + + SELECT_CC_FP, + BR_JT, + // Predicate MOV + MOVF, + MOVT, + // shift + SHL, + SRA, + SRL, + SRC, + SSL, + SSR, + + MEMW, + S32C1I, + WSR, + RUR +}; +} + +class XtensaSubtarget; + +class XtensaTargetLowering : public TargetLowering { +public: + explicit XtensaTargetLowering(const TargetMachine &TM, + const XtensaSubtarget &STI); + + MVT getScalarShiftAmountTy(const DataLayout &, EVT LHSTy) const override { + return LHSTy.getSizeInBits() <= 32 ? MVT::i32 : MVT::i64; + } + + EVT getSetCCResultType(const DataLayout &, LLVMContext &, + EVT VT) const override { + if (!VT.isVector()) + return MVT::i32; + return VT.changeVectorElementTypeToInteger(); + } + + bool isFMAFasterThanFMulAndFAdd(EVT) const override { return true; } + + /// If a physical register, this returns the register that receives the + /// exception address on entry to an EH pad. + unsigned + getExceptionPointerRegister(const Constant *PersonalityFn) const override; + /// If a physical register, this returns the register that receives the + /// exception typeid on entry to a landing pad. + unsigned + getExceptionSelectorRegister(const Constant *PersonalityFn) const override; + + bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; + bool isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const override; + const char *getTargetNodeName(unsigned Opcode) const override; + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + TargetLowering::ConstraintType + getConstraintType(StringRef Constraint) const override; + TargetLowering::ConstraintWeight + getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const override; + + /// Returns the size of the platform's va_list object. + unsigned getVaListSizeInBits(const DataLayout &DL) const override; + + /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops + /// vector. If it is invalid, don't add anything to Ops. If hasMemory is + /// true it means one of the asm constraint of the inline asm instruction + /// being processed is 'm'. + void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, + std::vector &Ops, + SelectionDAG &DAG) const override; + + SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; + + MachineBasicBlock * + EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *BB) const override; + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &DL, SelectionDAG &DAG, + SmallVectorImpl &InVals) const override; + SDValue LowerCall(CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const override; + + virtual bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool isVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context) const override; + + SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, const SDLoc &DL, + SelectionDAG &DAG) const override; + bool shouldInsertFencesForAtomic(const Instruction *I) const override { + return true; + } + struct LTStr { + bool operator()(const char *S1, const char *S2) const { + return strcmp(S1, S2) < 0; + } + }; + + /// ByValArgInfo - Byval argument information. + struct ByValArgInfo { + unsigned FirstIdx; // Index of the first register used. + unsigned NumRegs; // Number of registers used for this argument. + unsigned Address; // Offset of the stack area used to pass this argument. + + ByValArgInfo() : FirstIdx(0), NumRegs(0), Address(0) {} + }; + +private: + const XtensaSubtarget &Subtarget; + + SDValue lowerBR_JT(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerImmediate(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerImmediateFP(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node, + SelectionDAG &DAG) const; + SDValue lowerBlockAddress(BlockAddressSDNode *Node, SelectionDAG &DAG) const; + SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const; + SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; + SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerVAARG(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; + + SDValue getTargetNode(SDValue Op, SelectionDAG &DAG, unsigned Flag) const; + SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const; + + // Implement EmitInstrWithCustomInserter for individual operation types. + MachineBasicBlock *emitCALL(MachineInstr *MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitSelectCC(MachineInstr &MI, + MachineBasicBlock *BB) const; + MachineBasicBlock *emitAtomicCmpSwap(MachineInstr &MI, MachineBasicBlock *BB, + int isByteOperand) const; + MachineBasicBlock *emitAtomicRMW(MachineInstr &MI, MachineBasicBlock *BB, + bool isByteOperand, unsigned Opcode, + bool inv, bool minmax) const; + MachineBasicBlock *emitAtomicRMW(MachineInstr &MI, MachineBasicBlock *BB, + unsigned Opcode, bool inv, + bool minmax) const; + + CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const; + + unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { + if (ConstraintCode == "R") + return InlineAsm::Constraint_R; + else if (ConstraintCode == "ZC") + return InlineAsm::Constraint_ZC; + return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); + } +}; + +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAISELLOWERING_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaInstrFormats.td b/llvm/lib/Target/Xtensa/XtensaInstrFormats.td new file mode 100644 index 0000000000000..158b294b5faa4 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaInstrFormats.td @@ -0,0 +1,237 @@ +//===- XtensaInstrFormats.td - Xtensa Instruction Formats -------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------------===// + +// Base class for Xtensa 16 & 24 bit Formats +class XtensaInst pattern, + InstrItinClass itin = NoItinerary>: Instruction +{ + let Namespace = "Xtensa"; + + let Size = size; + + let OutOperandList = outs; + let InOperandList = ins; + + let AsmString = asmstr; + let Pattern = pattern; + let Itinerary = itin; + +} + +// Base class for Xtensa 24 bit Format +class XtensaInst24 pattern, + InstrItinClass itin = NoItinerary>: + XtensaInst<3, outs, ins, asmstr, pattern, itin> +{ + field bits<24> Inst; + field bits<24> SoftFail = 0; +} + +// Base class for Xtensa 16 bit Format +class XtensaInst16 pattern, + InstrItinClass itin = NoItinerary>: + XtensaInst<2, outs, ins, asmstr, pattern, itin> +{ + field bits<16> Inst; + field bits<16> SoftFail = 0; + let Predicates = [HasDensity]; +} + +class RRR_Inst op0, bits<4> op1, bits<4> op2, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<4> r; + bits<4> s; + bits<4> t; + //bits<4> op2; + + let Inst{23-20} = op2; + let Inst{19-16} = op1; + let Inst{15-12} = r; + let Inst{11-8} = s; + let Inst{7-4} = t; + let Inst{3-0} = op0; +} + +class RRI4_Inst op0, bits<4> op1, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<4> r; + bits<4> s; + bits<4> t; + bits<4> imm4; + + let Inst{23-20} = imm4; + let Inst{19-16} = op1; + let Inst{15-12} = r; + let Inst{11-8} = s; + let Inst{7-4} = t; + let Inst{3-0} = op0; +} + +class RRI8_Inst op0, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<4> r; + bits<4> s; + bits<4> t; + bits<8> imm8; + + let Inst{23-16} = imm8; + let Inst{15-12} = r; + let Inst{11-8} = s; + let Inst{7-4} = t; + let Inst{3-0} = op0; +} + +class RI16_Inst op0, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<4> t; + bits<16> imm16; + + let Inst{23-8} = imm16; + let Inst{7-4} = t; + let Inst{3-0} = op0; +} + +class RSR_Inst op0, bits<4> op1, bits<4> op2, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<8> sr; + bits<4> t; + + let Inst{23-20} = op2; + let Inst{19-16} = op1; + let Inst{15-8} = sr; + let Inst{7-4} = t; + let Inst{3-0} = op0; +} + +class CALL_Inst op0, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<18> offset; + bits<2> n; + + let Inst{23-6} = offset; + let Inst{5-4} = n; + let Inst{3-0} = op0; +} + +class CALLX_Inst op0, bits<4> op1, bits<4> op2, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<4> r; + bits<4> s; + bits<2> m; + bits<2> n; + + let Inst{23-20} = op2; + let Inst{19-16} = op1; + let Inst{15-12} = r; + let Inst{11-8} = s; + let Inst{7-6} = m; + let Inst{5-4} = n; + let Inst{3-0} = op0; +} + +class BRI8_Inst op0, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<8> imm8; + bits<4> r; + bits<4> s; + bits<2> m; + bits<2> n; + + let Inst{23-16} = imm8; + let Inst{15-12} = r; + let Inst{11-8} = s; + let Inst{7-6} = m; + let Inst{5-4} = n; + let Inst{3-0} = op0; +} + +class BRI12_Inst op0, bits<2> n, bits<2> m, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst24 +{ + bits<12> imm12; + bits<4> s; + + + let Inst{23-12} = imm12; + let Inst{11-8} = s; + let Inst{7-6} = m; + let Inst{5-4} = n; + let Inst{3-0} = op0; +} + +class RRRN_Inst op0, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst16 +{ + bits<4> r; + bits<4> s; + bits<4> t; + + let Inst{15-12} = r; + let Inst{11-8} = s; + let Inst{7-4} = t; + let Inst{3-0} = op0; +} + +class RI7_Inst op0, bits<1> i, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst16 +{ + bits<7> imm7; + bits<4> s; + + let Inst{15-12} = imm7{3-0}; + let Inst{11-8} = s; + let Inst{7} = i; + let Inst{6-4} = imm7{6-4}; + let Inst{3-0} = op0; +} + +class RI6_Inst op0, bits<1> i, bits<1> z, dag outs, dag ins, + string asmstr, list pattern, InstrItinClass itin = NoItinerary>: + XtensaInst16 +{ + bits<6> imm6; + bits<4> s; + + let Inst{15-12} = imm6{3-0}; + let Inst{11-8} = s; + let Inst{7} = i; + let Inst{6} = z; + let Inst{5-4} = imm6{5-4}; + let Inst{3-0} = op0; +} + +// Pseudo instructions +class Pseudo pattern> + : XtensaInst<2, outs, ins, asmstr, pattern> +{ + field bits<16> Inst; + field bits<16> SoftFail = 0; + let Inst = 0x0; + let isPseudo = 1; + let isCodeGenOnly = 1; +} diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp new file mode 100644 index 0000000000000..67eb3f79e288d --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -0,0 +1,716 @@ +//===- XtensaInstrInfo.cpp - Xtensa Instruction Information ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Xtensa implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "XtensaInstrInfo.h" +#include "XtensaConstantPoolValue.h" +#include "XtensaTargetMachine.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" + +#define GET_INSTRINFO_CTOR_DTOR +#include "XtensaGenInstrInfo.inc" + +using namespace llvm; + +enum { + // Branch type + UBRANCH = 1, + CBRANCH_RR = 2, + CBRANCH_RI = 3, + CBRANCH_RZ = 4, + CBRANCH_B = 5 +}; + +static inline const MachineInstrBuilder & +addFrameReference(const MachineInstrBuilder &MIB, int FI) { + MachineInstr *MI = MIB; + MachineFunction &MF = *MI->getParent()->getParent(); + MachineFrameInfo &MFFrame = MF.getFrameInfo(); + const MCInstrDesc &MCID = MI->getDesc(); + MachineMemOperand::Flags Flags = MachineMemOperand::MONone; + if (MCID.mayLoad()) + Flags |= MachineMemOperand::MOLoad; + if (MCID.mayStore()) + Flags |= MachineMemOperand::MOStore; + int64_t Offset = 0; + unsigned Align = MFFrame.getObjectAlignment(FI); + + MachineMemOperand *MMO = + MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI, Offset), + Flags, MFFrame.getObjectSize(FI), Align); + return MIB.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO); +} + +XtensaInstrInfo::XtensaInstrInfo(XtensaSubtarget &sti) + : XtensaGenInstrInfo(Xtensa::ADJCALLSTACKDOWN, Xtensa::ADJCALLSTACKUP), + RI(sti), STI(sti) {} + +/// Adjust SP by Amount bytes. +void XtensaInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc(); + + if (Amount == 0) + return; + + MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); + const TargetRegisterClass *RC = &Xtensa::ARRegClass; + + // create virtual reg to store immediate + unsigned Reg = RegInfo.createVirtualRegister(RC); + + if (isInt<8>(Amount)) // addi sp, sp, amount + BuildMI(MBB, I, DL, get(Xtensa::ADDI), Reg).addReg(SP).addImm(Amount); + else { // Expand immediate that doesn't fit in 12-bit. + unsigned Reg1; + loadImmediate(MBB, I, &Reg1, Amount); + BuildMI(MBB, I, DL, get(Xtensa::ADD), Reg) + .addReg(SP) + .addReg(Reg1, RegState::Kill); + } + + if (STI.isWinABI()) { + BuildMI(MBB, I, DL, get(Xtensa::MOVSP), SP).addReg(Reg, RegState::Kill); + } else { + BuildMI(MBB, I, DL, get(Xtensa::OR), SP) + .addReg(Reg, RegState::Kill) + .addReg(Reg, RegState::Kill); + } +} + +void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, unsigned DestReg, + unsigned SrcReg, bool KillSrc) const { + unsigned Opcode; + + // when we are copying a phys reg we want the bits for fp + if (Xtensa::ARRegClass.contains(DestReg, SrcReg)) { + if (STI.hasDensity()) + BuildMI(MBB, MBBI, DL, get(Xtensa::MOV_N), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + else + BuildMI(MBB, MBBI, DL, get(Xtensa::OR), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } else if (STI.hasSingleFloat() && Xtensa::FPRRegClass.contains(SrcReg) && + Xtensa::FPRRegClass.contains(DestReg)) + Opcode = Xtensa::MOV_S; + else if (STI.hasSingleFloat() && Xtensa::FPRRegClass.contains(SrcReg) && + Xtensa::ARRegClass.contains(DestReg)) + Opcode = Xtensa::RFR; + else if (STI.hasSingleFloat() && Xtensa::ARRegClass.contains(SrcReg) && + Xtensa::FPRRegClass.contains(DestReg)) + Opcode = Xtensa::WFR; + else + llvm_unreachable("Impossible reg-to-reg copy"); + + BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)); +} + +void XtensaInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned SrcReg, bool isKill, + int FrameIdx, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + unsigned LoadOpcode, StoreOpcode; + getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode, FrameIdx); + addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) + .addReg(SrcReg, getKillRegState(isKill)), + FrameIdx); +} + +void XtensaInstrInfo::loadRegFromStackSlot( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, + int FrameIdx, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + unsigned LoadOpcode, StoreOpcode; + getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode, FrameIdx); + addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), FrameIdx); +} + +void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, + unsigned &LoadOpcode, + unsigned &StoreOpcode, + int64_t offset) const { + if (RC == &Xtensa::ARRegClass) { + // TODO: Use L32I_N and S32I_N when it possible + LoadOpcode = Xtensa::L32I; + StoreOpcode = Xtensa::S32I; + } else if (RC == &Xtensa::FPRRegClass) { + LoadOpcode = Xtensa::L32F; + StoreOpcode = Xtensa::S32F; + } else + llvm_unreachable("Unsupported regclass to load or store"); +} + +void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned *Reg, int64_t Value) const { + DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); + const TargetRegisterClass *RC = &Xtensa::ARRegClass; + + // create virtual reg to store immediate + *Reg = RegInfo.createVirtualRegister(RC); + if ((Value >= -32 && Value <= 95) && STI.hasDensity()) { + BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI_N), *Reg).addImm(Value); + } else if (Value >= -2048 && Value <= 2047) { + BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), *Reg).addImm(Value); + } else if (Value >= -32768 && Value <= 32767) { + int Low = Value & 0xFF; + int High = Value & ~0xFF; + + BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), *Reg).addImm(Low); + BuildMI(MBB, MBBI, DL, get(Xtensa::ADDMI), *Reg).addReg(*Reg).addImm(High); + } else if (Value >= -4294967296LL && Value <= 4294967295LL) { + // 32 bit arbirary constant + MachineConstantPool *MCP = MBB.getParent()->getConstantPool(); + uint64_t UVal = ((uint64_t)Value) & 0xFFFFFFFFLL; + const Constant *CVal = ConstantInt::get( + Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), UVal, + false); + unsigned Idx = MCP->getConstantPoolIndex(CVal, 2U); + // MCSymbol MSym + BuildMI(MBB, MBBI, DL, get(Xtensa::L32R), *Reg).addConstantPoolIndex(Idx); + } else { + // use L32R to let assembler load immediate best + // TODO replace to L32R + llvm_unreachable("Unsupported load immediate value"); + } +} + +unsigned XtensaInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { + switch (MI.getOpcode()) { + case TargetOpcode::INLINEASM: { // Inline Asm: Variable size. + const MachineFunction *MF = MI.getParent()->getParent(); + const char *AsmStr = MI.getOperand(0).getSymbolName(); + return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); + } + default: + return MI.getDesc().getSize(); + } +} + +bool XtensaInstrInfo::reverseBranchCondition( + SmallVectorImpl &Cond) const { + assert(Cond.size() <= 4 && "Invalid branch condition!"); + + switch (Cond[0].getImm()) { + case Xtensa::BEQ: + Cond[0].setImm(Xtensa::BNE); + return false; + case Xtensa::BNE: + Cond[0].setImm(Xtensa::BEQ); + return false; + case Xtensa::BLT: + Cond[0].setImm(Xtensa::BGE); + return false; + case Xtensa::BGE: + Cond[0].setImm(Xtensa::BLT); + return false; + case Xtensa::BLTU: + Cond[0].setImm(Xtensa::BGEU); + return false; + case Xtensa::BGEU: + Cond[0].setImm(Xtensa::BLTU); + return false; + + case Xtensa::BEQI: + Cond[0].setImm(Xtensa::BNEI); + return false; + case Xtensa::BNEI: + Cond[0].setImm(Xtensa::BEQI); + return false; + case Xtensa::BGEI: + Cond[0].setImm(Xtensa::BLTI); + return false; + case Xtensa::BLTI: + Cond[0].setImm(Xtensa::BGEI); + return false; + case Xtensa::BGEUI: + Cond[0].setImm(Xtensa::BLTUI); + return false; + case Xtensa::BLTUI: + Cond[0].setImm(Xtensa::BGEUI); + return false; + + case Xtensa::BEQZ: + Cond[0].setImm(Xtensa::BNEZ); + return false; + case Xtensa::BNEZ: + Cond[0].setImm(Xtensa::BEQZ); + return false; + case Xtensa::BLTZ: + Cond[0].setImm(Xtensa::BGEZ); + return false; + case Xtensa::BGEZ: + Cond[0].setImm(Xtensa::BLTZ); + return false; + + case Xtensa::BF: + Cond[0].setImm(Xtensa::BT); + return false; + case Xtensa::BT: + Cond[0].setImm(Xtensa::BF); + return false; + default: + llvm_unreachable("Invalid branch condition!"); + } +} + +MachineBasicBlock * +XtensaInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { + unsigned OpCode = MI.getOpcode(); + switch (OpCode) { + case Xtensa::BR_JT: + case Xtensa::JX: + return nullptr; + case Xtensa::J: + return MI.getOperand(0).getMBB(); + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + return MI.getOperand(2).getMBB(); + + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + return MI.getOperand(2).getMBB(); + + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + return MI.getOperand(1).getMBB(); + + case Xtensa::BT: + case Xtensa::BF: + return MI.getOperand(1).getMBB(); + + default: + llvm_unreachable("Unknown branch opcode"); + } +} + +unsigned XtensaInstrInfo::BranchType(unsigned OpCode) const { + switch (OpCode) { + case Xtensa::J: + case Xtensa::JX: + case Xtensa::BR_JT: + return UBRANCH; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + return CBRANCH_RR; + + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + return CBRANCH_RI; + + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + return CBRANCH_RZ; + + case Xtensa::BT: + case Xtensa::BF: + return CBRANCH_B; + + default: + llvm_unreachable("Unknown branch opcode!"); + return 0; + } +} + +bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, + int64_t BrOffset) const { + switch (BranchOp) { + case Xtensa::J: + BrOffset -= 4; + return isIntN(18, BrOffset); + case Xtensa::JX: + return true; + case Xtensa::BR_JT: + return true; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + BrOffset -= 4; + return isIntN(8, BrOffset); + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + BrOffset -= 4; + return isIntN(12, BrOffset); + case Xtensa::BT: + case Xtensa::BF: + BrOffset -= 4; + return isIntN(8, BrOffset); + default: + llvm_unreachable("Unknown branch opcode"); + } +} + +bool XtensaInstrInfo::analyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify = false) const { + // Most of the code and comments here are boilerplate. + + // Start from the bottom of the block and work up, examining the + // terminator instructions. + MachineBasicBlock::iterator I = MBB.end(); + while (I != MBB.begin()) { + --I; + if (I->isDebugValue()) + continue; + + // Working from the bottom, when we see a non-terminator instruction, we're + // done. + if (!isUnpredicatedTerminator(*I)) + break; + + // A terminator that isn't a branch can't easily be handled by this + // analysis. + SmallVector ThisCond; + ThisCond.push_back(MachineOperand::CreateImm(0)); + const MachineOperand *ThisTarget; + if (!isBranch(I, ThisCond, ThisTarget)) + return true; + + // Can't handle indirect branches. + if (!ThisTarget->isMBB()) + return true; + + if (ThisCond[0].getImm() == Xtensa::J) { + // Handle unconditional branches. + if (!AllowModify) { + TBB = ThisTarget->getMBB(); + continue; + } + + // If the block has any instructions after a JMP, delete them. + while (std::next(I) != MBB.end()) + std::next(I)->eraseFromParent(); + + Cond.clear(); + FBB = 0; + + // TBB is used to indicate the unconditinal destination. + TBB = ThisTarget->getMBB(); + continue; + } + + // Working from the bottom, handle the first conditional branch. + if (Cond.empty()) { + // FIXME: add X86-style branch swap + FBB = TBB; + TBB = ThisTarget->getMBB(); + Cond.push_back(MachineOperand::CreateImm(ThisCond[0].getImm())); + + // push remaining operands + for (unsigned int i = 0; i < (I->getNumExplicitOperands() - 1); i++) + Cond.push_back(I->getOperand(i)); + + continue; + } + + // Handle subsequent conditional branches. + assert(Cond.size() <= 4); + assert(TBB); + + // Only handle the case where all conditional branches branch to the same + // destination. + if (TBB != ThisTarget->getMBB()) + return true; + + // If the conditions are the same, we can leave them alone. + unsigned OldCond = Cond[0].getImm(); + if (OldCond == ThisCond[0].getImm()) + continue; + } + + return false; +} + +unsigned XtensaInstrInfo::removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved) const { + // Most of the code and comments here are boilerplate. + MachineBasicBlock::iterator I = MBB.end(); + unsigned Count = 0; + if (BytesRemoved) + *BytesRemoved = 0; + + while (I != MBB.begin()) { + --I; + SmallVector Cond; + Cond.push_back(MachineOperand::CreateImm(0)); + const MachineOperand *Target; + if (!isBranch(I, Cond, Target)) + break; + if (!Target->isMBB()) + break; + // Remove the branch. + if (BytesRemoved) + *BytesRemoved += getInstSizeInBytes(*I); + I->eraseFromParent(); + I = MBB.end(); + ++Count; + } + return Count; +} + +unsigned XtensaInstrInfo::insertBranch( + MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, + ArrayRef Cond, const DebugLoc &DL, int *BytesAdded) const { + unsigned Count = 0; + if (BytesAdded) + *BytesAdded = 0; + if (FBB) { + // Need to build two branches then + // one to branch to TBB on Cond + // and a second one immediately after to unconditionally jump to FBB + Count = InsertBranchAtInst(MBB, MBB.end(), TBB, Cond, DL, BytesAdded); + auto &MI = *BuildMI(&MBB, DL, get(Xtensa::J)).addMBB(FBB); + Count++; + if (BytesAdded) + *BytesAdded += getInstSizeInBytes(MI); + return Count; + } + // This function inserts the branch at the end of the MBB + Count += InsertBranchAtInst(MBB, MBB.end(), TBB, Cond, DL, BytesAdded); + return Count; +} + +unsigned XtensaInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &DestBB, + const DebugLoc &DL, + int64_t BrOffset, + RegScavenger *RS) const { + assert(RS && "RegScavenger required for long branching"); + assert(MBB.empty() && + "new block should be inserted for expanding unconditional branch"); + assert(MBB.pred_size() == 1); + + MachineFunction *MF = MBB.getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + MachineConstantPool *ConstantPool = MF->getConstantPool(); + + if (!isInt<32>(BrOffset)) + report_fatal_error( + "Branch offsets outside of the signed 32-bit range not supported"); + XtensaConstantPoolValue *C = + XtensaConstantPoolMBB::Create(MF->getFunction().getContext(), &DestBB, 0); + unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); + + // FIXME: A virtual register must be used initially, as the register + // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch + // uses the same workaround). + Register ScratchReg = MRI.createVirtualRegister(&Xtensa::ARRegClass); + auto II = MBB.end(); + + MachineInstr &L32R = *BuildMI(MBB, II, DL, get(Xtensa::L32R), ScratchReg) + .addConstantPoolIndex(Idx); + BuildMI(MBB, II, DL, get(Xtensa::JX)).addReg(ScratchReg, RegState::Kill); + RS->enterBasicBlockEnd(MBB); + unsigned Scav = RS->scavengeRegisterBackwards(Xtensa::ARRegClass, + L32R.getIterator(), false, 0); + MRI.replaceRegWith(ScratchReg, Scav); + MRI.clearVirtRegs(); + RS->setRegUsed(Scav); + return 3 + 3; +} + +unsigned XtensaInstrInfo::InsertConstBranchAtInst( + MachineBasicBlock &MBB, MachineInstr *I, int64_t offset, + ArrayRef Cond, DebugLoc DL, int *BytesAdded) const { + // Shouldn't be a fall through. + assert(&MBB && "InsertBranch must not be told to insert a fallthrough"); + assert(Cond.size() <= 4 && + "Xtensa branch conditions have less than four components!"); + + if (Cond.empty() || (Cond[0].getImm() == Xtensa::J)) { + // Unconditional branch + MachineInstr *MI = BuildMI(MBB, I, DL, get(Xtensa::J)).addImm(offset); + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + return 1; + } + + unsigned Count = 0; + unsigned BR_C = Cond[0].getImm(); + unsigned BRANCH_TYPE = BranchType(BR_C); + MachineInstr *MI = nullptr; + switch (BRANCH_TYPE) { + case CBRANCH_RR: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addImm(offset) + .addReg(Cond[1].getReg()) + .addReg(Cond[2].getReg()); + break; + case CBRANCH_RI: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addImm(offset) + .addReg(Cond[1].getReg()) + .addImm(Cond[2].getImm()); + break; + case CBRANCH_RZ: + MI = BuildMI(MBB, I, DL, get(BR_C)).addImm(offset).addReg(Cond[1].getReg()); + break; + case CBRANCH_B: + MI = BuildMI(MBB, I, DL, get(BR_C)).addImm(offset).addReg(Cond[1].getReg()); + break; + default: + llvm_unreachable("Invalid branch type!"); + } + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + ++Count; + return Count; +} + +unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + MachineBasicBlock *TBB, + ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded) const { + // Shouldn't be a fall through. + assert(TBB && "InsertBranch must not be told to insert a fallthrough"); + assert(Cond.size() <= 4 && + "Xtensa branch conditions have less than four components!"); + + if (Cond.empty() || (Cond[0].getImm() == Xtensa::J)) { + // Unconditional branch + MachineInstr *MI = BuildMI(MBB, I, DL, get(Xtensa::J)).addMBB(TBB); + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + return 1; + } + + unsigned Count = 0; + unsigned BR_C = Cond[0].getImm(); + unsigned BRANCH_TYPE = BranchType(BR_C); + MachineInstr *MI = nullptr; + switch (BRANCH_TYPE) { + case CBRANCH_RR: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addReg(Cond[1].getReg()) + .addReg(Cond[2].getReg()) + .addMBB(TBB); + break; + case CBRANCH_RI: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addReg(Cond[1].getReg()) + .addImm(Cond[2].getImm()) + .addMBB(TBB); + break; + case CBRANCH_RZ: + MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); + break; + case CBRANCH_B: + MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); + break; + default: + llvm_unreachable("Invalid branch type!"); + } + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + ++Count; + return Count; +} + +bool XtensaInstrInfo::isBranch(const MachineBasicBlock::iterator &MI, + SmallVectorImpl &Cond, + const MachineOperand *&Target) const { + unsigned OpCode = MI->getOpcode(); + switch (OpCode) { + case Xtensa::J: + case Xtensa::JX: + case Xtensa::BR_JT: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(0); + return true; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(2); + return true; + + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(2); + return true; + + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(1); + return true; + + case Xtensa::BT: + case Xtensa::BF: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(1); + return true; + + default: + assert(!MI->getDesc().isBranch() && "Unknown branch opcode"); + return false; + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h new file mode 100644 index 0000000000000..1312733fa7129 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h @@ -0,0 +1,108 @@ +//===-- XtensaInstrInfo.h - Xtensa Instruction Information ----------*- C++ +//-*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===--------------------------------------------------------------------------===// +// +// This file contains the Xtensa implementation of the TargetInstrInfo class. +// +//===--------------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAINSTRINFO_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAINSTRINFO_H + +#include "Xtensa.h" +#include "XtensaRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" + +#define GET_INSTRINFO_HEADER + +#include "XtensaGenInstrInfo.inc" + +namespace llvm { + +class XtensaTargetMachine; +class XtensaSubtarget; +class XtensaInstrInfo : public XtensaGenInstrInfo { + const XtensaRegisterInfo RI; + XtensaSubtarget &STI; + +public: + XtensaInstrInfo(XtensaSubtarget &STI); + + void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; + unsigned getInstSizeInBytes(const MachineInstr &MI) const override; + + // Return the XtensaRegisterInfo, which this class owns. + const XtensaRegisterInfo &getRegisterInfo() const { return RI; } + + void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, + bool KillSrc) const override; + void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, unsigned SrcReg, + bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, unsigned DestReg, + int FrameIdx, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + // Get the load and store opcodes for a given register class and offset. + void getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, + unsigned &StoreOpcode, int64_t offset) const; + + // Emit code before MBBI in MI to move immediate value Value into + // physical register Reg. + void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + unsigned *Reg, int64_t Value) const; + bool + reverseBranchCondition(SmallVectorImpl &Cond) const override; + MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override; + + bool isBranchOffsetInRange(unsigned BranchOpc, + int64_t BrOffset) const override; + bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const override; + unsigned removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved = nullptr) const override; + unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded = nullptr) const override; + unsigned insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &NewDestBB, + const DebugLoc &DL, int64_t BrOffset = 0, + RegScavenger *RS = nullptr) const override; + unsigned InsertBranchAtInst(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + MachineBasicBlock *TBB, + ArrayRef Cond, const DebugLoc &DL, + int *BytesAdded) const; + unsigned InsertConstBranchAtInst(MachineBasicBlock &MBB, MachineInstr *I, + int64_t offset, + ArrayRef Cond, DebugLoc DL, + int *BytesAdded) const; + // Return true if MI is a conditional or unconditional branch. + // When returning true, set Cond to the mask of condition-code + // values on which the instruction will branch, and set Target + // to the operand that contains the branch target. This target + // can be a register or a basic block. + bool isBranch(const MachineBasicBlock::iterator &MI, + SmallVectorImpl &Cond, + const MachineOperand *&Target) const; + + unsigned BranchType(unsigned BR_CODE) const; +}; +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAINSTRINFO_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td new file mode 100644 index 0000000000000..41b7d50278530 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -0,0 +1,1502 @@ +//===- XtensaInstrInfo.td - Target Description for Xtensa Target -*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------------===// +// +// This file describes the Xtensa instructions in TableGen format. +// +//===----------------------------------------------------------------------------===// + +include "XtensaInstrFormats.td" +include "XtensaOperands.td" +include "XtensaOperators.td" + +//===----------------------------------------------------------------------===// +// Arithmetic & Logical instructions +//===----------------------------------------------------------------------===// + +class ArithLogic_RRR oper2, bits<4> oper1, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRR_Inst<0x00, oper1, oper2, (outs AR:$r), (ins AR:$s, AR:$t), + instrAsm#"\t$r, $s, $t", + [(set AR:$r, (opNode AR:$s, AR:$t))]> +{ + let isCommutable = isComm; + let isReMaterializable = 0; +} + +def ADD: ArithLogic_RRR<0x08, 0x00, "add", add, 1>; +def SUB: ArithLogic_RRR<0x0C, 0x00, "sub", sub>; +def AND: ArithLogic_RRR<0x01, 0x00, "and", and, 1>; +def OR: ArithLogic_RRR<0x02, 0x00, "or", or, 1>; +def XOR: ArithLogic_RRR<0x03, 0x00, "xor", xor, 1>; + +class ADDX oper, string instrAsm, list pattern> + : RRR_Inst<0x00, 0x00, oper, (outs AR:$r), (ins AR:$s, AR:$t), + instrAsm#"\t$r, $s, $t", pattern>; + +def ADDX2: ADDX<0x09, "addx2", [(set AR:$r, (add AR:$t, (shl AR:$s, (i32 1))))]>; +def ADDX4: ADDX<0x0A, "addx4", [(set AR:$r, (add AR:$t, (shl AR:$s, (i32 2))))]>; +def ADDX8: ADDX<0x0B, "addx8", [(set AR:$r, (add AR:$t, (shl AR:$s, (i32 3))))]>; + +class SUBX oper, string instrAsm, list pattern> + : RRR_Inst<0x00, 0x00, oper, (outs AR:$r), (ins AR:$s, AR:$t), + instrAsm#"\t$r, $s, $t", pattern>; + +def SUBX2: SUBX<0x0D, "subx2", [(set AR:$r, (sub (shl AR:$s, (i32 1)), AR:$t))]>; +def SUBX4: SUBX<0x0E, "subx4", [(set AR:$r, (sub (shl AR:$s, (i32 2)), AR:$t))]>; +def SUBX8: SUBX<0x0F, "subx8", [(set AR:$r, (sub (shl AR:$s, (i32 3)), AR:$t))]>; + +def ABS: RRR_Inst<0x00, 0x00, 0x06, (outs AR:$r), (ins AR:$t), + "abs\t$r, $t", []> +{ + let s = 0x1; +} + +def ADDI: RRI8_Inst<0x02, (outs AR:$t), (ins AR:$s, imm8:$imm8), + "addi\t$t, $s, $imm8", + [(set AR:$t, (add AR:$s, imm8:$imm8))]> +{ + let r = 0x0C; +} + +def ADDMI: RRI8_Inst<0x02, (outs AR:$t), (ins AR:$s, imm8_sh8:$imm_sh8), + "addmi\t$t, $s, $imm_sh8", + [(set AR:$t, (add AR:$s, imm8_sh8:$imm_sh8))]> +{ + bits<16> imm_sh8; + + let r = 0x0D; + let imm8 = imm_sh8{15-8}; +} + +def NEG: RRR_Inst<0x00, 0x00, 0x06, (outs AR:$r), (ins AR:$t), + "neg\t$r, $t", + [(set AR:$r, (ineg AR:$t))]> +{ + let s = 0x00; +} + +//===----------------------------------------------------------------------===// +// Move instructions +//===----------------------------------------------------------------------===// +def MOVI: RRI8_Inst<0x02, (outs AR:$t), (ins imm12m:$imm), + "movi\t$t, $imm", + [(set AR:$t, imm12m:$imm)]> +{ + bits<12> imm; + + let imm8{7-0} = imm{7-0}; + let s{3-0} = imm{11-8}; + let r = 0xa; +} + +def MOVEQZ : RRR_Inst<0x00, 0x03, 0x08, (outs AR:$r), (ins AR:$s, AR:$t), + "moveqz\t$r, $s, $t", []>; +def MOVNEZ : RRR_Inst<0x00, 0x03, 0x09, (outs AR:$r), (ins AR:$s, AR:$t), + "movnez\t$r, $s, $t", []>; +def MOVLTZ : RRR_Inst<0x00, 0x03, 0x0A, (outs AR:$r), (ins AR:$s, AR:$t), + "movltz\t$r, $s, $t", []>; +def MOVGEZ : RRR_Inst<0x00, 0x03, 0x0B, (outs AR:$r), (ins AR:$s, AR:$t), + "movgez\t$r, $s, $t", []>; + +//===----------------------------------------------------------------------===// +// Shift instructions +//===----------------------------------------------------------------------===// +/* +let Uses = [SAR] in +{ + def SLL: RRR_Inst<0x00, 0x01, 0x0A, (outs AR:$r), (ins AR:$s), + "sll\t$r, $s", []> + { + let t = 0x00; + } + + def SRA: RRR_Inst<0x00, 0x01, 0x0B, (outs AR:$r), (ins AR:$t), + "sra\t$r, $t", []> + { + let s = 0x00; + } + + def SRC: RRR_Inst<0x00, 0x01, 0x08, (outs AR:$r), (ins AR:$s, AR:$t), + "src\t$r, $s, $t", []>; + + def SRL: RRR_Inst<0x00, 0x01, 0x09, (outs AR:$r), (ins AR:$t), + "srl\t$r, $t", []> + { + let s = 0x00; + } +} + +let Defs = [SAR] in +{ + def SSL: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$s), + "ssl\t$s", []> + { + let r = 0x01; + let t = 0x00; + } + + def SSR: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$s), + "ssr\t$s", []> + { + let r = 0x00; + let t = 0x00; + } +}*/ + +def EXTUI: RRR_Inst<0x00, 0x04, 0x00, (outs AR:$r), (ins AR:$t, uimm5:$imm1, imm1_16:$imm2), + "extui\t$r, $t, $imm1, $imm2", + []> +{ + bits<5> imm1; + bits<4> imm2; + + let s = imm1{3-0}; + let Inst{16} = imm1{4}; + let Inst{23-20} = imm2; +} + +def SRAI: RRR_Inst<0x00, 0x01, 0x02, (outs AR:$r), (ins AR:$t, uimm5:$sa), + "srai\t$r, $t, $sa", + [(set AR:$r, (sra AR:$t, uimm5:$sa))]> +{ + bits<5> sa; + + let Inst{20} = sa{4}; + let s = sa{3-0}; +} + +def SRLI: RRR_Inst<0x00, 0x01, 0x04, (outs AR:$r), (ins AR:$t, uimm4:$sa), + "srli\t$r, $t, $sa", + [(set AR:$r, (srl AR:$t, uimm4:$sa))]> +{ + bits<4> sa; + + let s = sa; +} + +def SLLI: RRR_Inst<0x00, 0x01, 0x00, (outs AR:$r), (ins AR:$s, shimm1_31:$sa), + "slli\t$r, $s, $sa", + [(set AR:$r, (shl AR:$s, shimm1_31:$sa))]> +{ + bits<5> sa; + + let Inst{20} = sa{4}; + let t = sa{3-0}; +} + +def SSA8L : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$s), + "ssa8l\t$s", []> +{ + let r = 0x2; + let t = 0x0; +} + +def SSAI: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins uimm5:$imm), + "ssai\t$imm", []> +{ + bits<5> imm; + + let r = 0x04; + let s = imm{3-0}; + let t{3-1} = 0; + let t{0} = imm{4}; +} + +//===----------------------------------------------------------------------===// +// Load and store instructions +//===----------------------------------------------------------------------===// + +// Load instructions +let mayLoad = 1, usesCustomInserter = 1 in +{ + + class Load_RRI8 oper, string instrAsm, SDPatternOperator opNode, + ComplexPattern addrOp, Operand memOp> + : RRI8_Inst<0x02, (outs AR:$t), (ins memOp:$addr), + instrAsm#"\t$t, $addr", + [(set AR:$t, (opNode addrOp:$addr))]> + { + bits<12> addr; + + let r = oper; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def L8UI: Load_RRI8<0x00, "l8ui", zextloadi8, addr_ish1, mem8>; +def L16SI: Load_RRI8<0x09, "l16si", sextloadi16, addr_ish2, mem16>; +def L16UI: Load_RRI8<0x01, "l16ui", zextloadi16, addr_ish2, mem16>; +def L32I: Load_RRI8<0x02, "l32i", load, addr_ish4, mem32>; + +// Store instructions +let mayStore = 1, usesCustomInserter = 1 in +{ + class Store_II8 oper, string instrAsm, SDPatternOperator opNode, + ComplexPattern addrOp, Operand memOp> + : RRI8_Inst<0x02, (outs), (ins AR:$t, memOp:$addr), + instrAsm#"\t$t, $addr", + [(opNode AR:$t, addrOp:$addr)]> + { + bits<12> addr; + + let r = oper; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def S8I: Store_II8<0x04, "s8i", truncstorei8, addr_ish1, mem8>; +def S16I: Store_II8<0x05, "s16i", truncstorei16, addr_ish2, mem16>; +def S32I: Store_II8<0x06, "s32i", store, addr_ish4, mem32>; + +def L32R: RI16_Inst<0x01, + (outs AR:$t), (ins L32Rtarget:$label), + "l32r\t$t, $label", []> +{ + bits<16> label; + let imm16 = label; +} + +//pcrel addr loading using L32R +def : Pat<(Xtensa_pcrel_wrapper tconstpool:$in), (L32R tconstpool:$in)>; + +//===----------------------------------------------------------------------===// +// Conditional branch instructions +//===----------------------------------------------------------------------===// +let isBranch = 1, isTerminator = 1 in +{ + class Branch_RR oper, string instrAsm, CondCode CC> + : RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + instrAsm#"\t$s, $t, $target", + [(brcc CC, AR:$s, AR:$t, bb:$target)]> + { + bits<8> target; + + let r = oper; + let imm8 = target; + } + + class Branch_RI oper, string instrAsm, CondCode CC> + : RRI8_Inst<0x06, (outs), + (ins AR:$s, b4const:$imm, brtarget:$target), + instrAsm#"\t$s, $imm, $target", + [(brcc CC, AR:$s, b4const:$imm, bb:$target)]> + { + bits<4> imm; + bits<8> target; + + let t = oper; + let r = imm; + let imm8 = target; + } + + class Branch_RIU oper, string instrAsm, CondCode CC> + : RRI8_Inst<0x06, (outs), + (ins AR:$s, b4constu:$imm, brtarget:$target), + instrAsm#"\t$s, $imm, $target", + [(brcc CC, AR:$s, b4constu:$imm, bb:$target)]> + { + bits<4> imm; + bits<8> target; + + let t = oper; + let r = imm; + let imm8 = target; + } + + class Branch_RZ n, bits<2> m, string instrAsm, CondCode CC> + : BRI12_Inst<0x06, n, m, (outs), + (ins AR:$s, brtarget:$target), + instrAsm#"\t$s, $target", + [(brcc CC, AR:$s, (i32 0), bb:$target)]> + { + bits<12> target; + + let imm12 = target; + } +} + +def BEQ: Branch_RR<0x01, "beq", SETEQ>; +def BNE: Branch_RR<0x09, "bne", SETNE>; +def BGE: Branch_RR<0x0A, "bge", SETGE>; +def BLT: Branch_RR<0x02, "blt", SETLT>; +def BGEU: Branch_RR<0x0B, "bgeu", SETUGE>; +def BLTU: Branch_RR<0x03, "bltu", SETULT>; + +def BEQI: Branch_RI<0x02, "beqi", SETEQ>; +def BNEI: Branch_RI<0x06, "bnei", SETNE>; +def BGEI: Branch_RI<0x0E, "bgei", SETGE>; +def BLTI: Branch_RI<0x0A, "blti", SETLT>; +def BGEUI: Branch_RIU<0x0F, "bgeui", SETUGE>; +def BLTUI: Branch_RIU<0x0B, "bltui", SETULT>; + +def BEQZ: Branch_RZ<0x01, 0x00, "beqz", SETEQ>; +def BNEZ: Branch_RZ<0x01, 0x01, "bnez", SETNE>; +def BGEZ: Branch_RZ<0x01, 0x03, "bgez", SETGE>; +def BLTZ: Branch_RZ<0x01, 0x02, "bltz", SETLT>; + +def BALL: RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + "ball\t$s, $t, $target", []> +{ + bits<8> target; + + let r = 0x04; + let imm8 = target; +} + +def BANY: RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + "bany\t$s, $t, $target", []> +{ + bits<8> target; + + let r = 0x08; + let imm8 = target; +} + +def BBC: RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + "bbc\t$s, $t, $target", []> +{ + bits<8> target; + + let r = 0x05; + let imm8 = target; +} + +def BBS: RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + "bbs\t$s, $t, $target", []> +{ + bits<8> target; + + let r = 0x0d; + let imm8 = target; +} + +def BNALL: RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + "bnall\t$s, $t, $target", []> +{ + bits<8> target; + + let r = 0x0c; + let imm8 = target; +} + +def BNONE: RRI8_Inst<0x07, (outs), + (ins AR:$s, AR:$t, brtarget:$target), + "bnone\t$s, $t, $target", []> +{ + bits<8> target; + + let r = 0x00; + let imm8 = target; +} + +def BBCI: RRI8_Inst<0x07, (outs), + (ins AR:$s, uimm5:$imm, brtarget:$target), + "bbci\t$s, $imm, $target", []> +{ + bits<8> target; + bits<5> imm; + + let r{3-1} = 0x3; + let r{0} = imm{4}; + let t{3-0} = imm{3-0}; + let imm8 = target; +} + +def BBSI: RRI8_Inst<0x07, (outs), + (ins AR:$s, uimm5:$imm, brtarget:$target), + "bbsi\t$s, $imm, $target", []> +{ + bits<8> target; + bits<5> imm; + + let r{3-1} = 0x7; + let r{0} = imm{4}; + let t{3-0} = imm{3-0}; + let imm8 = target; +} + +def : Pat<(brcc SETGT, AR:$s, AR:$t, bb:$target), + (BLT AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcc SETUGT, AR:$s, AR:$t, bb:$target), + (BLTU AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcc SETLE, AR:$s, AR:$t, bb:$target), + (BGE AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcc SETULE, AR:$s, AR:$t, bb:$target), + (BGEU AR:$t, AR:$s, bb:$target)>; + +def : Pat<(brcond (i32 (seteq AR:$s, AR:$t)), bb:$target), + (BEQ AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setne AR:$s, AR:$t)), bb:$target), + (BNE AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setge AR:$s, AR:$t)), bb:$target), + (BGE AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target), + (BLT AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setuge AR:$s, AR:$t)), bb:$target), + (BGEU AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setult AR:$s, AR:$t)), bb:$target), + (BLTU AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setgt AR:$s, AR:$t)), bb:$target), + (BLT AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcond (i32 (setugt AR:$s, AR:$t)), bb:$target), + (BLTU AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target), + (BGE AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcond (i32 (setule AR:$s, AR:$t)), bb:$target), + (BGEU AR:$t, AR:$s, bb:$target)>; + +//===----------------------------------------------------------------------===// +// Call and jump instructions +//===----------------------------------------------------------------------===// + +let isBranch = 1, isTerminator = 1, isBarrier = 1 in +{ + def J: CALL_Inst<0x06, (outs), (ins jumptarget:$offset), + "j\t$offset", + [(br bb:$offset)]> + { + let n = 0x0; + } + + def JX: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "jx\t$s", + [(brind AR:$s)]> + { + let m = 0x2; + let n = 0x2; + let r = 0; + let isIndirectBranch = 1; + } +} + +let isCall = 1, Defs = [A0] in +{ + def CALL0: CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call0\t$offset", []> + { + let n = 0; + } + + let isIndirectBranch = 1 in + { + def CALLX0: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx0\t$s", [(Xtensa_call AR:$s)]> + { + let m = 0x3; + let n = 0x0; + let r = 0; + } + } +} + +let isReturn = 1, isTerminator = 1, + isBarrier = 1, hasCtrlDep = 1, Uses = [A0] in +{ + + def RET: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins), + "ret", [(Xtensa_retflag)]> + { + let m = 0x2; + let n = 0x0; + let s = 0; + let r = 0; + } +} + +// calls +def : Pat<(Xtensa_call AR:$dst), + (CALLX0 AR:$dst)>; + +//===----------------------------------------------------------------------===// +// Mem barrier instructions +//===----------------------------------------------------------------------===// + +def MEMW: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "memw", [(Xtensa_mem_barrier)]> +{ + let r = 0x2; + let t = 0x0c; + let s = 0x0; + let hasSideEffects = 1; +} + +def EXTW : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "extw", []> +{ + let r = 0x2; + let s = 0x0; + let t = 0xd; +} + +//===----------------------------------------------------------------------===// +// Processor control instructions +//===----------------------------------------------------------------------===// + +def DSYNC: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "dsync", []> +{ + let r = 0x2; + let s = 0x0; + let t = 0x3; +} + +def ISYNC: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "isync", []> +{ + let r = 0x2; + let s = 0x0; + let t = 0x0; +} + +def RSYNC: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rsync", []> +{ + let r = 0x2; + let s = 0x0; + let t = 0x1; +} + +def ESYNC: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "esync", []> +{ + let r = 0x2; + let s = 0x0; + let t = 0x2; +} + +def NOP: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "nop", + []> +{ + let r = 0x02; + let s = 0x00; + let t = 0x0f; +} + +def WSR: RSR_Inst<0x00, 0x03, 0x01, (outs SR:$sr), (ins AR:$t), + "wsr\t$t, $sr", []>; + +def RSR: RSR_Inst<0x00, 0x03, 0x00, (outs AR:$t), (ins SR:$sr), + "rsr\t$t, $sr", []>; + +def XSR: RSR_Inst<0x00, 0x01, 0x06, (outs), (ins AR:$t, SR:$sr), + "xsr\t$t, $sr", []>; + +def WUR: RRR_Inst<0x00, 0x03, 0x0F, (outs UR:$ur), (ins AR:$t), + "wur\t$t, $ur", []> +{ + bits<8> ur; + + let r = ur{7-4}; + let s = ur{3-0}; +} + +def RUR: RRR_Inst<0x00, 0x03, 0x0E, (outs AR:$r), (ins UR:$ur), + "rur\t$r, $ur", [(set AR:$r, (Xtensa_rur UR:$ur))]> +{ + bits<8> ur; + + let s = ur{7-4}; + let t = ur{3-0}; +} + +def RER: RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), + "rer\t$t, $s", []> +{ + let r = 0x6; +} + +def WER: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$t, AR:$s), + "wer\t$t, $s", []> +{ + let r = 0x7; + let hasSideEffects = 1; +} + +//===----------------------------------------------------------------------===// +// Stack allocation +//===----------------------------------------------------------------------===// + +// ADJCALLSTACKDOWN/UP implicitly use/def SP because they may be expanded into +// a stack adjustment and the codegen must know that they may modify the stack +// pointer before prolog-epilog rewriting occurs. +let Defs = [SP], Uses = [SP] in +{ + def ADJCALLSTACKDOWN: Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), + "#ADJCALLSTACKDOWN", + [(Xtensa_callseq_start timm:$amt1, timm:$amt2)]>; + def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), + "#ADJCALLSTACKUP", + [(Xtensa_callseq_end timm:$amt1, timm:$amt2)]>; +} + +//===----------------------------------------------------------------------===// +// Generic select instruction +//===----------------------------------------------------------------------===// +let usesCustomInserter = 1 in +{ + def SELECT: Pseudo<(outs AR:$dst), (ins AR:$lhs, AR:$rhs, AR:$t, AR:$f, i32imm:$cond), + "!select $dst, $lhs, $rhs, $t, $f, $cond", + [(set AR:$dst, (Xtensa_select_cc AR:$lhs, AR:$rhs, AR:$t, AR:$f, imm:$cond))]>; +} + +//===----------------------------------------------------------------------===// +// Code Density instructions +//===----------------------------------------------------------------------===// + +class ArithLogic_RRRN oper0, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRRN_Inst, Requires<[HasDensity]> +{ + let isCommutable = isComm; + let isReMaterializable = 0; +} + +def ADD_N: ArithLogic_RRRN<0x0a, "add.n", add, 1>; + +def ADDI_N: RRRN_Inst<0x0B, (outs AR:$r), (ins AR:$s, imm1n_15:$imm), + "addi.n\t$r, $s, $imm", + [(set AR:$r, (add AR:$s, imm1n_15:$imm))]>, Requires<[HasDensity]> +{ + bits<4> imm; + + let t = imm; +} + +def MOV_N: RRRN_Inst<0x0D, (outs AR:$t), (ins AR:$s), + "mov.n\t$t, $s", []>, Requires<[HasDensity]> +{ + let r = 0; +} + +def : InstAlias<"mov\t $t, $s", (OR AR:$t, AR:$s, AR:$s)>; + +def MOVI_N: RI7_Inst<0xc, 0x0, (outs AR:$s), (ins imm32n_95:$imm7), + "movi.n\t$s, $imm7", + [(set AR:$s, imm32n_95:$imm7)]>, Requires<[HasDensity]>; + +// Load instruction +let mayLoad = 1, usesCustomInserter = 1 in +{ + def L32I_N: RRRN_Inst<0x8, (outs AR:$t), (ins mem32n:$addr), + "l32i.n\t$t, $addr", []>, Requires<[HasDensity]> + { + bits<8> addr; + + let r{3-0} = addr{7-4}; + let s{3-0} = addr{3-0}; + } +} + +// Store instruction +let mayStore = 1, usesCustomInserter = 1 in +{ + def S32I_N: RRRN_Inst<0x9, (outs), (ins AR:$t, mem32n:$addr), + "s32i.n\t$t, $addr", []>, Requires<[HasDensity]> + { + bits<8> addr; + + let r{3-0} = addr{7-4}; + let s{3-0} = addr{3-0}; + } +} + +//Return instruction +let isReturn = 1, isTerminator = 1, + isBarrier = 1, hasCtrlDep = 1, Uses = [A0] in +{ + def RET_N: RRRN_Inst<0x0D, (outs), (ins), + "ret.n", [(Xtensa_retflag)]>, Requires<[HasDensity]> + { + let r = 0x0F; + let s = 0; + let t = 0; + } +} + +//===----------------------------------------------------------------------===// +// Windowed instructions +//===----------------------------------------------------------------------===// + +def ENTRY: BRI12_Inst<0x06, 0x3, 0x0, (outs), (ins AR:$s, entry_imm12:$imm), + "entry\t$s, $imm", []>, Requires<[HasWindowed]> +{ + bits<15> imm; + + let imm12{11-0} = imm{14-3}; + let Defs = [SP]; +} + +//Call instructions +let isCall = 1, Defs = [A0] in +{ + def CALL4: CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call4\t$offset", []>, Requires<[HasWindowed]> + { + let n = 1; + } + + def CALL8: CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call8\t$offset", []>, Requires<[HasWindowed]> + { + let n = 2; + } + + def CALL12: CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call12\t$offset", []>, Requires<[HasWindowed]> + { + let n = 3; + } + + let isIndirectBranch = 1 in + { + def CALLX4: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx4\t$s", []>, Requires<[HasWindowed]> + { + let m = 0x3; + let n = 0x1; + let r = 0; + } + + def CALLX8: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx8\t$s", []>, Requires<[HasWindowed]> + { + let m = 0x3; + let n = 0x2; + let r = 0; + } + + def CALLX12: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx12\t$s", []>, Requires<[HasWindowed]> + { + let m = 0x3; + let n = 0x3; + let r = 0; + } + } +} + +def MOVSP: RRR_Inst<0x00, 0x00, 0x00, (outs AR:$t), (ins AR:$s), + "movsp\t$t, $s", + [(set AR:$t, (Xtensa_movsp AR:$s))]>, Requires<[HasWindowed]> +{ + let r = 0x01; +} + +//Return instructions +let isReturn = 1, isTerminator = 1, + isBarrier = 1, hasCtrlDep = 1, Uses = [A0] in +{ + def RETW_N: RRRN_Inst<0x0D, (outs), (ins), + "retw.n", [(Xtensa_retWflag)]>, Requires<[HasWindowed, HasDensity]> + { + let r = 0x0F; + let s = 0; + let t = 1; + } + + def RETW: CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins), + "retw", [(Xtensa_retWflag)]>, Requires<[HasWindowed]> + { + let m = 0x2; + let n = 0x1; + let s = 0; + let r = 0; + } +} + +//Store 32-bit for Window Exceptions +def S32E: RRI4_Inst<0x00, 0x09, (outs), (ins AR:$t, AR:$s, imm64n_4n:$imm), + "s32e\t$t $s $imm", []>, Requires<[HasWindowed]> +{ + bits<6> imm; + + let r = imm{5-2}; + let imm4 = 0x4; + let mayStore = 1; +} + +def L32E: RRI4_Inst<0x00, 0x09, (outs), (ins AR:$t, AR:$s, imm64n_4n:$imm), + "l32e\t$t $s $imm", []>, Requires<[HasWindowed]> +{ + bits<6> imm; + + let r = imm{5-2}; + let imm4 = 0x0; + let mayLoad = 1; +} + +//Return from window +def RFWU: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rfwu", []>, Requires<[HasWindowed]> +{ + bits<4> imm; + + let r = 0x3; + let s = 0x5; + let t = 0x0; +} + +def RFWO: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rfwo", []>, Requires<[HasWindowed]> +{ + bits<4> imm; + + let r = 0x3; + let s = 0x4; + let t = 0x0; +} + +//Rotate window +def ROTW: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins imm8n_7:$imm), + "rotw\t$imm", []>, Requires<[HasWindowed]> +{ + bits<4> imm; + + let r = 0x8; + let s = 0x0; + let t = imm{3-0}; +} + +//===----------------------------------------------------------------------===// +// Floating-Point Instructions +//===----------------------------------------------------------------------===// + +class FPArith_RRR oper2, bits<4> oper1, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRR_Inst<0x00, oper1, oper2, (outs FPR:$r), (ins FPR:$s, FPR:$t), + instrAsm#"\t$r, $s, $t", + [(set FPR:$r, (opNode FPR:$s, FPR:$t))]> +{ + let isCommutable = isComm; + let isReMaterializable = 0; + let Predicates = [HasSingleFloat]; +} + +def ADD_S: FPArith_RRR<0x00, 0x0A, "add.s", fadd, 1>; +def SUB_S: FPArith_RRR<0x01, 0x0A, "sub.s", fsub>; +def MUL_S: FPArith_RRR<0x02, 0x0A, "mul.s", fmul, 1>; + +def ABS_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "abs.s\t$r, $s", + [(set FPR:$r, (fabs FPR:$s))]> +{ + let t = 0x01; +} + +def NEG_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "neg.s\t$r, $s", + [(set FPR:$r, (fneg FPR:$s))]> +{ + let t = 0x06; +} + +def TRUNC_S: RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s), + "trunc.s\t$r, $s, 0", + [(set AR:$r, (fp_to_sint FPR:$s))]> +{ + let t = 0x00; +} + +def UTRUNC_S: RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s), + "utrunc.s\t$r, $s, 0", + [(set AR:$r, (fp_to_uint FPR:$s))]> +{ + let t = 0x00; +} + +def FLOAT_S: RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s), + "float.s\t$r, $s, 0", + [(set FPR:$r, (sint_to_fp AR:$s))]> +{ + let t = 0x00; +} + +def UFLOAT_S: RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s), + "ufloat.s\t$r, $s, 0", + [(set FPR:$r, (uint_to_fp AR:$s))]> +{ + let t = 0x00; +} + +def RFR: RRR_Inst<0x00, 0x0A, 0x0f, (outs AR:$r), (ins FPR:$s), + "rfr\t$r, $s", + [(set AR:$r, (bitconvert FPR:$s))]> +{ + let t = 0x04; +} + +def WFR: RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), + "wfr\t$r, $s", + [(set FPR:$r, (bitconvert AR:$s))]> +{ + let t = 0x05; +} + +// FP load instructions +let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in +{ + class LoadF_RRI8 oper, string instrAsm, SDPatternOperator opNode, + ComplexPattern addrOp,Operand memOp>: RRI8_Inst<0x03, (outs FPR:$t), (ins memOp:$addr), + instrAsm#"\t$t, $addr", + [(set FPR:$t, (opNode addrOp:$addr))]> + { + bits<12> addr; + + let r = oper; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def L32F: LoadF_RRI8<0x00, "lsi", load, addr_ish4, mem32>, Requires<[]>; + +// FP store instructions +let mayStore = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in +{ + class StoreF_RRI8 oper, string instrAsm, SDPatternOperator opNode, + ComplexPattern addrOp, Operand memOp>: RRI8_Inst<0x03, (outs), (ins FPR:$t, memOp:$addr), + instrAsm#"\t$t, $addr", + [(opNode FPR:$t, addrOp:$addr)]> + { + bits<12> addr; + + let r = oper; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def S32F: StoreF_RRI8<0x04, "ssi", store, addr_ish4, mem32>; + +// FP compare instructions +let isCompare = 1, Predicates = [HasSingleFloat] in +{ + class FCompare oper2, bits<4> oper1, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRR_Inst<0x00, oper1, oper2, (outs BR:$b), (ins FPR:$s, FPR:$t), + instrAsm#"\t$b, $s, $t", + [(set BR:$b, (opNode FPR:$s, FPR:$t))]> + { + let isCommutable = isComm; + let isReMaterializable = 0; + let Predicates = [HasSingleFloat]; + } +} + +def OEQ_S: FCompare<0x02, 0x0b, "oeq.s", Xtensa_cmpoeq, 1>; +def OLT_S: FCompare<0x04, 0x0b, "olt.s", Xtensa_cmpolt, 1>; +def OLE_S: FCompare<0x06, 0x0b, "ole.s", Xtensa_cmpole, 1>; + +def UEQ_S: FCompare<0x03, 0x0b, "ueq.s", Xtensa_cmpueq, 1>; +def ULT_S: FCompare<0x05, 0x0b, "ult.s", Xtensa_cmpult, 1>; +def ULE_S: FCompare<0x07, 0x0b, "ule.s", Xtensa_cmpule, 1>; +def UN_S: FCompare<0x01, 0x0b, "un.s", Xtensa_cmpuo, 1>; + +//FP complex operations +def MADD_S: RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), + "madd.s\t$r, $s, $t", + [(set FPR:$r, (Xtensa_madd FPR:$a, FPR:$s, FPR:$t))]>, Requires<[HasSingleFloat]> +{ + let isCommutable = 0; + let isReMaterializable = 0; + let Constraints = "$r = $a"; +} + +def MSUB_S: RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), + "msub.s\t$r, $s, $t", + [(set FPR:$r, (Xtensa_msub FPR:$a, FPR:$s, FPR:$t))]>, Requires<[HasSingleFloat]> +{ + let isCommutable = 0; + let isReMaterializable = 0; + let Constraints = "$r = $a"; +} + +//FP move operations +def MOV_S: RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), + "mov.s\t$r, $s", + [(set FPR:$r, (Xtensa_movs FPR:$s))]>, Requires<[HasSingleFloat]> +{ + let t = 0x00; +} + +def CONST_S: RRR_Inst<0x00, 0x0a, 0x0f, (outs FPR:$r), (ins uimm4:$imm), + "const.s\t$r, $imm", []>, Requires<[HasSingleFloat]> +{ + bits<4> imm; + + let t = 0x03; + let s = imm{3-0}; +} + +def DIV0_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "div0.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x7; +} + +def MADDN_S: RRR_Inst<0x00, 0x0A, 0x06, (outs FPR:$r), (ins FPR:$s, FPR:$t), + "maddn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> +{ + let isCommutable = 0; +} + +def MKDADJ_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "mkdadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x0D; +} + +def MKSADJ_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "mksadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x0C; +} + +def ADDEXP_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x0E; +} + +def ADDEXPM_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "addexpm.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x0F; +} + +def DIVN_S: RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), + "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> +{ +} + +def NEXP01_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "nexp01.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x0B; +} + +def SQRT0_S: RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "sqrt0.s\t$r, $s", []>, Requires<[HasSingleFloat]> +{ + let t = 0x09; +} + +//===----------------------------------------------------------------------===// +// Boolean branch Instructions +//===----------------------------------------------------------------------===// + +let isBranch = 1, isTerminator = 1, Predicates = [HasBoolean] in +{ + def BT: RRI8_Inst<0x06, (outs), (ins BR:$b, brtarget:$target), + "bt\t$b, $target", + [(Xtensa_brcc_t BR:$b, bb:$target)]> + { + bits<8> target; + bits<4> b; + + let r = 0x1; + let s = b; + let t = 0x7; + let imm8 = target; + } + + def BF: RRI8_Inst<0x06, (outs), (ins BR:$b, brtarget:$target), + "bf\t$b, $target", + [(Xtensa_brcc_f BR:$b, bb:$target)]> + { + bits<8> target; + bits<4> b; + + let r = 0x0; + let s = b; + let t = 0x7; + let imm8 = target; + } +} + + +//===----------------------------------------------------------------------===// +// FP select operations +let usesCustomInserter = 1 in +{ + def SELECT_CC_FP_INT: Pseudo<(outs AR:$dst), (ins FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, i32imm:$cond), + "!select_cc_fp_int $dst, $lhs, $rhs, $t, $f, $cond", + [(set AR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, imm:$cond))]>; + def SELECT_CC_INT_FP: Pseudo<(outs FPR:$dst), (ins AR:$lhs, AR:$rhs, FPR:$t, FPR:$f, i32imm:$cond), + "!select_cc_int_fp $dst, $lhs, $rhs, $t, $f, $cond", + [(set FPR:$dst, (Xtensa_select_cc_fp AR:$lhs, AR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; + def SELECT_CC_FP_FP: Pseudo<(outs FPR:$dst), (ins FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, i32imm:$cond), + "!select_cc_fp_fp $dst, $lhs, $rhs, $t, $f, $cond", + [(set FPR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; +} + +// Shift Pseudo instructions: +// SSL/SSR + Shift combination +let usesCustomInserter = 1 in +{ + def SLL_P: Pseudo<(outs AR:$r), (ins AR:$s, AR:$sa), + "# SLL_P $r, $s, $sa", + [(set AR:$r, (shl AR:$s, AR:$sa))]>; + + def SRA_P: Pseudo<(outs AR:$r), (ins AR:$t, AR:$sa), + "# SRA_P $r, $t, $sa", + [(set AR:$r, (sra AR:$t, AR:$sa))]>; + + def SRL_P: Pseudo<(outs AR:$r), (ins AR:$t, AR:$sa), + "# SRL_P $r, $t, $sa", + [(set AR:$r, (srl AR:$t, AR:$sa))]>; +} + +// Xtensa missed L8I load operation, use pseudo operation +let usesCustomInserter = 1 in +def L8I_P: Pseudo<(outs AR:$t), (ins mem8:$addr), + "!L8I_P $t, $addr", + [(set AR:$t, (sextloadi8 + addr_ish1:$addr))]>; + +def SEXT: RRR_Inst<0x00, 0x03, 0x02, (outs AR:$r), (ins AR:$s, seimm7_22:$imm), + "sext\t$r, $s, $imm", + []>,Requires<[HasSEXT]> +{ + bits<4> imm; + + let t = imm; +} + +// FrameIndexes are legalized when they are operands from load/store +// instructions. The same not happens for stack address copies, so an +// add op with mem ComplexPattern is used and the stack address copy +// can be matched. +// Setting of attribute mayLoad is trick to process instruction operands +// in function XtensaRegisterInfo::eliminateFI + +let isCodeGenOnly = 1, mayLoad = 1 in +{ + + def LEA_ADD : RRI8_Inst<0x02, (outs AR:$t), (ins mem32:$addr), + "addi\t$t, $addr", + [(set AR:$t, addr_ish4:$addr)]> + { + bits<12> addr; + + let r = 0x0C; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def MULL: ArithLogic_RRR<0x08, 0x02, "mull", mul, 1>, Requires<[HasMul32]>; +def MULUH: ArithLogic_RRR<0x0A, 0x02, "muluh", mulhu, 1>, Requires<[HasMul32High]>; +def MULSH: ArithLogic_RRR<0x0B, 0x02, "mulsh", mulhs, 1>, Requires<[HasMul32High]>; +def QUOS: ArithLogic_RRR<0x0D, 0x02, "quos", sdiv>, Requires<[HasDiv32]>; +def QUOU: ArithLogic_RRR<0x0C, 0x02, "quou", udiv>, Requires<[HasDiv32]>; +def REMS: ArithLogic_RRR<0x0F, 0x02, "rems", srem>, Requires<[HasDiv32]>; +def REMU: ArithLogic_RRR<0x0E, 0x02, "remu", urem>, Requires<[HasDiv32]>; + +let Predicates = [HasNSA] in { + def NSA : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), + "nsa\t$t, $s", + []> + { + let r = 0xE; + } + + def NSAU : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), + "nsau\t$t, $s", + []> + { + let r = 0xF; + } +} + +def : Pat<(brcond AR:$s, bb:$target), + (BNEZ AR:$s, bb:$target)>; + +// It's a trick for redundant branches with -O0 option +// like br i1 true, label ... +def : Pat<(brcond (i32 1), bb:$target), + (J bb:$target)>; + +let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1, Size = 3 in +{ + def BR_JT: Pseudo<(outs), (ins AR:$s, i32imm:$jt), + "!br_jt_p, $s, $jt", + [(Xtensa_brjt AR:$s, tjumptable:$jt)]>; +} + +//extended loads +def : Pat<(i32 (extloadi1 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; +def : Pat<(i32 (extloadi8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; +def : Pat<(i32 (extloadi16 addr_ish2:$addr)), (L16UI addr_ish2:$addr)>; + +// Shift instructions +let Uses = [SAR] in +{ + def SLL: RRR_Inst<0x00, 0x01, 0x0A, (outs AR:$r), (ins AR:$s), + "sll\t$r, $s", + [(set AR:$r, (Xtensa_shl AR:$s))]> + { + let t = 0x00; + } + + def SRA: RRR_Inst<0x00, 0x01, 0x0B, (outs AR:$r), (ins AR:$t), + "sra\t$r, $t", + [(set AR:$r, (Xtensa_sra AR:$t))]> + { + let s = 0x00; + } + + def SRC: RRR_Inst<0x00, 0x01, 0x08, (outs AR:$r), (ins AR:$s, AR:$t), + "src\t$r, $s, $t", + [(set AR:$r, (Xtensa_src AR:$s, AR:$t))]>; + + def SRL: RRR_Inst<0x00, 0x01, 0x09, (outs AR:$r), (ins AR:$t), + "srl\t$r, $t", + [(set AR:$r, (Xtensa_srl AR:$t))]> + { + let s = 0x00; + } +} + +let Defs = [SAR] in +{ + def SSL: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$s), + "ssl\t$s", + [(Xtensa_ssl AR:$s)]> + { + let r = 0x01; + let t = 0x00; + } + + def SSR: RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$s), + "ssr\t$s", + [(Xtensa_ssr AR:$s)]> + { + let r = 0x00; + let t = 0x00; + } +} + +let isBarrier = 1, isTerminator = 1 in +{ + def BREAK: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins i32imm:$t, i32imm:$s), + "break\t$t, $s", []> + { + let r = 0x04; + } +} + +def: Pat<(trap), (BREAK (i32 1), (i32 15))>; + +// calls +def : Pat<(Xtensa_call (i32 tglobaladdr:$dst)), + (CALL0 tglobaladdr:$dst)>; +def : Pat<(Xtensa_callw (i32 tglobaladdr:$dst)), + (CALL8 tglobaladdr:$dst)>; +def : Pat<(Xtensa_call (i32 texternalsym:$dst)), + (CALL0 texternalsym:$dst)>; +def : Pat<(Xtensa_callw (i32 texternalsym:$dst)), + (CALL8 texternalsym:$dst)>; +def : Pat<(Xtensa_call AR:$dst), + (CALLX0 AR:$dst)>; +def : Pat<(Xtensa_callw AR:$dst), + (CALLX8 AR:$dst)>; + +def RSIL: RRR_Inst<0x00, 0x00, 0x00, (outs AR:$t), (ins uimm4:$imm), + "rsil\t$t, $imm", []> +{ + bits<4> imm; + + let r = 0x6; + let s = imm{3-0}; +} + +def WAITI: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins uimm4:$imm), + "waiti\t$imm", []> +{ + bits<4> imm; + + let r = 0x7; + let s = imm{3-0}; + let t = 0; +} + +def WDTLB: RRR_Inst<0x00, 0x00, 0x05, (outs AR:$t), (ins AR:$s), + "wdtlb\t$t, $s", []> +{ + let r = 0xE; +} + +def WITLB: RRR_Inst<0x00, 0x00, 0x05, (outs AR:$t), (ins AR:$s), + "witlb\t$t, $s", []> +{ + let r = 0x6; +} + +def RFI: RRR_Inst<0x00, 0x00, 0x00, (outs), (ins uimm4:$imm), + "rfi\t$imm", []> +{ + bits<4> imm; + + let r = 0x3; + let s = imm{3-0}; + let t = 0x1; +} + +let mayStore = 1, mayLoad = 1, usesCustomInserter = 1, Predicates = [HasS32C1I] in +{ + def S32C1I: RRI8_Inst<0x02, (outs AR:$a), (ins AR:$t, mem32:$addr), + "s32c1i\t$t, $addr", []> + { + bits<12> addr; + + let r = 0x0e; + let Uses = [SCOMPARE1]; + let Constraints = "$a = $t"; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +//===----------------------------------------------------------------------===// +// Atomic patterns +//===----------------------------------------------------------------------===// + +def : Pat<(i32 (atomic_load_8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; +def : Pat<(i32 (atomic_load_16 addr_ish2:$addr)), (L16UI addr_ish2:$addr)>; +def : Pat<(i32 (atomic_load_32 addr_ish4:$addr)), (L32I addr_ish4:$addr)>; + +def : Pat<(atomic_store_8 addr_ish1:$addr, AR:$t), (S8I AR:$t, addr_ish1:$addr)>; +def : Pat<(atomic_store_16 addr_ish2:$addr, AR:$t), (S16I AR:$t, addr_ish2:$addr)>; +def : Pat<(atomic_store_32 addr_ish4:$addr, AR:$t), (S32I AR:$t, addr_ish4:$addr)>; + +let usesCustomInserter = 1, Predicates = [HasS32C1I] in +{ + def ATOMIC_CMP_SWAP_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), + "!atomic_cmp_swap_8_p, $dst, $ptr, $cmp, $swap", + [(set AR:$dst, (atomic_cmp_swap_8 AR:$ptr, AR:$cmp, AR:$swap))]>; + def ATOMIC_CMP_SWAP_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), + "!atomic_cmp_swap_16_p, $dst, $ptr, $cmp, $swap", + [(set AR:$dst, (atomic_cmp_swap_16 AR:$ptr, AR:$cmp, AR:$swap))]>; + def ATOMIC_CMP_SWAP_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), + "!atomic_cmp_swap_32_p, $dst, $ptr, $cmp, $swap", + [(set AR:$dst, (atomic_cmp_swap_32 AR:$ptr, AR:$cmp, AR:$swap))]>; + + def ATOMIC_LOAD_ADD_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_add_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_add_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_ADD_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_add_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_add_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_ADD_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_add_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_add_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_SUB_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_sub_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_sub_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_SUB_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_sub_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_sub_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_SUB_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_sub_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_sub_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_AND_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_and_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_and_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_AND_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_and_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_and_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_AND_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_and_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_and_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_OR_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_or_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_or_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_OR_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_or_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_or_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_OR_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_or_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_or_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_XOR_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_xor_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_xor_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_XOR_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_xor_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_xor_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_XOR_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_xor_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_xor_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_NAND_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_nand_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_nand_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_NAND_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_nand_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_nand_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_NAND_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_nand_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_nand_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_MIN_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_min_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_min_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MIN_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_min_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_min_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MIN_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_min_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_min_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_MAX_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_max_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_max_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MAX_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_max_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_max_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MAX_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_max_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_max_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_UMIN_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umin_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umin_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMIN_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umin_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umin_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMIN_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umin_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umin_32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_UMAX_8_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umax_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umax_8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMAX_16_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umax_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umax_16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMAX_32_P: Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umax_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umax_32 AR:$ptr, AR:$arg))]>; +} + diff --git a/llvm/lib/Target/Xtensa/XtensaMCInstLower.cpp b/llvm/lib/Target/Xtensa/XtensaMCInstLower.cpp new file mode 100644 index 0000000000000..f5182e7d467a9 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMCInstLower.cpp @@ -0,0 +1,140 @@ +//===- XtensaMCInstLower.cpp - Convert Xtensa MachineInstr to MCInst ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains code to lower Xtensa MachineInstrs to their corresponding +// MCInst records. +// +//===----------------------------------------------------------------------===// + +#include "XtensaMCInstLower.h" +#include "MCTargetDesc/XtensaBaseInfo.h" +#include "MCTargetDesc/XtensaMCExpr.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCStreamer.h" + +using namespace llvm; + +XtensaMCInstLower::XtensaMCInstLower(MCContext &ctx, + XtensaAsmPrinter &asmPrinter) + : Ctx(ctx), Printer(asmPrinter) {} + +MCSymbol * +XtensaMCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const { + return Printer.GetExternalSymbolSymbol(MO.getSymbolName()); +} + +MCSymbol * +XtensaMCInstLower::GetJumpTableSymbol(const MachineOperand &MO) const { + return Printer.GetJTISymbol(MO.getIndex()); +} + +MCSymbol * +XtensaMCInstLower::GetConstantPoolIndexSymbol(const MachineOperand &MO) const { + // Create a symbol for the name. + return Printer.GetCPISymbol(MO.getIndex()); +} + +MCSymbol * +XtensaMCInstLower::GetBlockAddressSymbol(const MachineOperand &MO) const { + return Printer.GetBlockAddressSymbol(MO.getBlockAddress()); +} + +MCOperand +XtensaMCInstLower::LowerSymbolOperand(const MachineOperand &MO, + MachineOperand::MachineOperandType MOTy, + unsigned Offset) const { + const MCSymbol *Symbol; + XtensaMCExpr::VariantKind Kind = XtensaMCExpr::VK_Xtensa_None; + + switch (MOTy) { + case MachineOperand::MO_MachineBasicBlock: + Symbol = MO.getMBB()->getSymbol(); + break; + case MachineOperand::MO_GlobalAddress: + Symbol = Printer.getSymbol(MO.getGlobal()); + Offset += MO.getOffset(); + break; + case MachineOperand::MO_BlockAddress: + Symbol = Printer.GetBlockAddressSymbol(MO.getBlockAddress()); + Offset += MO.getOffset(); + break; + case MachineOperand::MO_ExternalSymbol: + Symbol = GetExternalSymbolSymbol(MO); + Offset += MO.getOffset(); + break; + case MachineOperand::MO_JumpTableIndex: + Symbol = GetJumpTableSymbol(MO); + break; + case MachineOperand::MO_ConstantPoolIndex: + Symbol = GetConstantPoolIndexSymbol(MO); + Offset += MO.getOffset(); + break; + default: + llvm_unreachable(""); + } + + const MCExpr *ME = + MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None, Ctx); + + ME = XtensaMCExpr::create(ME, Kind, Ctx); + + if (Offset) { + // Assume offset is never negative. + assert(Offset > 0); + + const MCConstantExpr *OffsetExpr = MCConstantExpr::create(Offset, Ctx); + ME = MCBinaryExpr::createAdd(ME, OffsetExpr, Ctx); + } + + return MCOperand::createExpr(ME); +} + +MCOperand XtensaMCInstLower::lowerOperand(const MachineOperand &MO, + unsigned Offset) const { + MachineOperand::MachineOperandType MOTy = MO.getType(); + + switch (MOTy) { + case MachineOperand::MO_Register: + // Ignore all implicit register operands. + if (MO.isImplicit()) + break; + return MCOperand::createReg(MO.getReg()); + case MachineOperand::MO_Immediate: + return MCOperand::createImm(MO.getImm() + Offset); + case MachineOperand::MO_MachineBasicBlock: + case MachineOperand::MO_GlobalAddress: + case MachineOperand::MO_ExternalSymbol: + case MachineOperand::MO_JumpTableIndex: + case MachineOperand::MO_ConstantPoolIndex: + case MachineOperand::MO_BlockAddress: + return LowerSymbolOperand(MO, MOTy, Offset); + case MachineOperand::MO_RegisterMask: + break; + default: + llvm_unreachable("unknown operand type"); + } + + return MCOperand(); +} + +void XtensaMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { + OutMI.setOpcode(MI->getOpcode()); + + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + MCOperand MCOp = lowerOperand(MO); + + if (MCOp.isValid()) + OutMI.addOperand(MCOp); + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaMCInstLower.h b/llvm/lib/Target/Xtensa/XtensaMCInstLower.h new file mode 100644 index 0000000000000..bbcc2816463da --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMCInstLower.h @@ -0,0 +1,59 @@ +//===- XtensaMCInstLower.h - Lower MachineInstr to MCInst -----*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAMCINSTLOWER_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAMCINSTLOWER_H + +#include "XtensaAsmPrinter.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { +class MCContext; +class MCInst; +class MCOperand; +class MCSymbol; +class MachineInstr; +class MachineOperand; +class XtensaAsmPrinter; + +class LLVM_LIBRARY_VISIBILITY XtensaMCInstLower { + MCContext &Ctx; + XtensaAsmPrinter &Printer; + +public: + XtensaMCInstLower(MCContext &ctx, XtensaAsmPrinter &asmPrinter); + + // Lower MachineInstr MI to MCInst OutMI. + void lower(const MachineInstr *MI, MCInst &OutMI) const; + + // Return an MCOperand for MO. Return an empty operand if MO is implicit. + MCOperand lowerOperand(const MachineOperand &MO, unsigned Offset = 0) const; + + // Return an MCOperand for MO, given that it equals Symbol + Offset. + MCOperand lowerSymbolOperand(const MachineOperand &MO, const MCSymbol *Symbol, + int64_t Offset) const; + +private: + MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const; + + MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const; + + MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const; + + MCSymbol *GetBlockAddressSymbol(const MachineOperand &MO) const; + + MCOperand LowerSymbolOperand(const MachineOperand &MO, + MachineOperand::MachineOperandType MOTy, + unsigned Offset) const; +}; +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAMCINSTLOWER_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp new file mode 100644 index 0000000000000..568d025cf924f --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp @@ -0,0 +1,18 @@ +//===- XtensaMachineFunctionInfo.cpp - Private data used for Xtensa -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "XtensaMachineFunctionInfo.h" +//#include "MCTargetDesc/XtensaBaseInfo.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Function.h" + +using namespace llvm; diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h new file mode 100644 index 0000000000000..04caa941cacbe --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h @@ -0,0 +1,55 @@ +//==- XtensaMachineFunctionInfo.h - Xtensa machine function info -*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file declares Xtensa-specific per-machine-function information. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H + +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class XtensaFunctionInfo : public MachineFunctionInfo { + MachineFunction &MF; + + unsigned VarArgsFirstGPR; + int VarArgsStackOffset; + unsigned VarArgsFrameIndex; + +public: + explicit XtensaFunctionInfo(MachineFunction &MF) + : MF(MF), VarArgsFirstGPR(0), VarArgsStackOffset(0), + VarArgsFrameIndex(0) { + MF.setAlignment(Align(4)); + } + + unsigned getVarArgsFirstGPR() const { return VarArgsFirstGPR; } + void setVarArgsFirstGPR(unsigned GPR) { VarArgsFirstGPR = GPR; } + + int getVarArgsStackOffset() const { return VarArgsStackOffset; } + void setVarArgsStackOffset(int Offset) { VarArgsStackOffset = Offset; } + + // Get and set the frame index of the first stack vararg. + unsigned getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + void setVarArgsFrameIndex(unsigned FI) { VarArgsFrameIndex = FI; } + + // TODO: large frame size definition should be specified more precisely + bool isLargeFrame() { + return (MF.getFrameInfo().estimateStackSize(MF) > 512) ? true : false; + } +}; + +} // namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td new file mode 100644 index 0000000000000..b99bd584e5e48 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -0,0 +1,256 @@ +//===- XtensaOperands.td - Xtensa instruction operands ----*- tblgen-*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===-------------------------------------------------------------------===// + +// Immediate operands with a shared generic render method. +class ImmAsmOperand : AsmOperandClass +{ + let Name = name; + let RenderMethod = "addImmOperands"; + let DiagnosticType = !strconcat("Invalid", name); +} + +class Immediate + : Operand, ImmLeaf +{ + let PrintMethod = "print"##asmop; + let ParserMatchClass = !cast(asmop); +} + +// imm8 predicate - Immediate in the range [-128,127] +def Imm8_AsmOperand: ImmAsmOperand<"Imm8">; +def imm8: Immediate= -128 && Imm <= 127; }], "Imm8_AsmOperand"> { + let EncoderMethod = "getImm8OpValue"; + let DecoderMethod = "decodeImm8Operand"; +} + +// imm8n_7 predicate - Immediate in the range [-8,7] +def Imm8n_7_AsmOperand: ImmAsmOperand<"Imm8n_7">; +def imm8n_7: Immediate= -8 && Imm <= 7; }], "Imm8n_7_AsmOperand"> { + let EncoderMethod = "getImm8n_7OpValue"; + let DecoderMethod = "decodeImm8n_7Operand"; +} + +// imm64n_4n predicate - Immediate in the range [-64,-4] +def Imm64n_4n_AsmOperand: ImmAsmOperand<"Imm64n_4n">; +def imm64n_4n: Immediate= -64 && Imm <= -4; }], "Imm64n_4n_AsmOperand"> { + let EncoderMethod = "getImm64n_4nOpValue"; + let DecoderMethod = "decodeImm64n_4nOperand"; +} + +// imm8_sh8 predicate - Immediate in the range [-32768,32512] with (bits[7-0] == 0) +// imm8 value left shifted by 8 bits +def Imm8_sh8_AsmOperand: ImmAsmOperand<"Imm8_sh8">; +def imm8_sh8: Immediate= -32768 && Imm <= 32512 && ((Imm & 0xFF) == 0); }], "Imm8_sh8_AsmOperand"> { + let EncoderMethod = "getImm8_sh8OpValue"; + let DecoderMethod = "decodeImm8_sh8Operand"; +} + +// imm12 predicate - Immediate in the range [-2048,2047] +def Imm12_AsmOperand: ImmAsmOperand<"Imm12">; +def imm12: Immediate= -2048 && Imm <= 2047; }], "Imm12_AsmOperand"> { + let EncoderMethod = "getImm12OpValue"; + let DecoderMethod = "decodeImm12Operand"; +} + +// imm12m predicate - Immediate for MOV operation +def Imm12m_AsmOperand: ImmAsmOperand<"Imm12m">; +def imm12m: Immediate= -2048 && Imm <= 2047; }], "Imm12m_AsmOperand"> { + let EncoderMethod = "getImm12OpValue"; + let DecoderMethod = "decodeImm12Operand"; +} + +// uimm4 predicate - Immediate in the range [0,15] +def Uimm4_AsmOperand: ImmAsmOperand<"Uimm4">; +def uimm4: Immediate= 0 && Imm <= 15; }], "Uimm4_AsmOperand"> { + let EncoderMethod = "getUimm4OpValue"; + let DecoderMethod = "decodeUimm4Operand"; +} + +// uimm5 predicate - Immediate in the range [0,31] +def Uimm5_AsmOperand: ImmAsmOperand<"Uimm5">; +def uimm5: Immediate= 0 && Imm <= 31; }], "Uimm5_AsmOperand"> { + let EncoderMethod = "getUimm5OpValue"; + let DecoderMethod = "decodeUimm5Operand"; +} + +// imm1_16 predicate - Immediate in the range [1,16] +def Imm1_16_AsmOperand: ImmAsmOperand<"Imm1_16">; +def imm1_16: Immediate= 1 && Imm <= 16; }], "Imm1_16_AsmOperand"> { + let EncoderMethod = "getImm1_16OpValue"; + let DecoderMethod = "decodeImm1_16Operand"; +} + +// imm1n_15 predicate - Immediate in the range [-1,15], except 0 +def Imm1n_15_AsmOperand: ImmAsmOperand<"Imm1n_15">; +def imm1n_15: Immediate= -1 && Imm <= 15 && Imm != 0; }], "Imm1n_15_AsmOperand"> { + let EncoderMethod = "getImm1n_15OpValue"; + let DecoderMethod = "decodeImm1n_15Operand"; +} + +// imm32n_95 predicate - Immediate in the range [-32,95] +def Imm32n_95_AsmOperand: ImmAsmOperand<"Imm32n_95">; +def imm32n_95: Immediate= -32 && Imm <= 95; }], "Imm32n_95_AsmOperand"> { + let EncoderMethod = "getImm32n_95OpValue"; + let DecoderMethod = "decodeImm32n_95Operand"; +} + +// shimm1_31 predicate - Immediate in the range [1,31] +def Shimm1_31_AsmOperand: ImmAsmOperand<"Shimm1_31">; +def shimm1_31: Immediate= 1 && Imm <= 31; }], "Shimm1_31_AsmOperand"> { + let EncoderMethod = "getShimm1_31OpValue"; + let DecoderMethod = "decodeShimm1_31Operand"; +} + +// Memory offset 0..255 for 8-bit memory accesses +def Offset8m8_AsmOperand: ImmAsmOperand<"Offset8m8">; +def offset8m8: Immediate= 0 && Imm <= 255; }], + "Offset8m8_AsmOperand">; + +// Memory offset 0..510 for 16-bit memory accesses +def Offset8m16_AsmOperand: ImmAsmOperand<"Offset8m16">; +def offset8m16: Immediate= 0 && Imm <= 510 && (Imm & 0x1 == 0); }], + "Offset8m16_AsmOperand">; + +// Memory offset 0..1020 for 32-bit memory accesses +def Offset8m32_AsmOperand: ImmAsmOperand<"Offset8m32">; +def offset8m32: Immediate= 0 && Imm <= 1020 && (Imm & 0x3 == 0); }], + "Offset8m32_AsmOperand">; + +// Memory offset 0..60 for 32-bit memory accesses +def Offset4m32_AsmOperand: ImmAsmOperand<"Offset4m32">; +def offset4m32: Immediate= 0 && Imm <= 60 && (Imm & 0x3 == 0); }], + "Offset4m32_AsmOperand">; + +// entry_imm12 predicate - Immediate in the range [0,32760], ENTRY parameter +def Entry_Imm12_AsmOperand: ImmAsmOperand<"entry_imm12">; +def entry_imm12: Immediate= 0 && Imm <= 32760 && (Imm & 0x3 == 0); }], "Entry_Imm12_AsmOperand"> { + let EncoderMethod = "getEntry_Imm12OpValue"; +} + +// b4const predicate - Branch Immediate 4-bit signed operand +def B4const_AsmOperand: ImmAsmOperand<"B4const">; +def b4const: Immediate { + let EncoderMethod = "getB4constOpValue"; + let DecoderMethod = "decodeB4constOperand"; +} + +// b4constu predicate - Branch Immediate 4-bit unsigned operand +def B4constu_AsmOperand: ImmAsmOperand<"B4constu">; +def b4constu: Immediate { + let EncoderMethod = "getB4constuOpValue"; + let DecoderMethod = "decodeB4constuOperand"; +} +//===----------------------------------------------------------------------===// +// Memory address operands +//===----------------------------------------------------------------------===// + +class mem : Operand +{ + let MIOperandInfo = (ops AR, offset); + let EncoderMethod = "getMemRegEncoding"; + let OperandType = "OPERAND_MEMORY"; + let PrintMethod = "printMemOperand"; +} + +def mem8: mem +{ + let DecoderMethod = "decodeMem8Operand"; +} + +def mem16: mem +{ + let DecoderMethod = "decodeMem16Operand"; +} + +def mem32: mem +{ + let DecoderMethod = "decodeMem32Operand"; +} + +def mem32n: mem +{ + let DecoderMethod = "decodeMem32nOperand"; +} + +//Add patterns for future use in stack addressing mode +def addr_ish1: ComplexPattern; +def addr_ish2: ComplexPattern; +def addr_ish4: ComplexPattern; + +//===----------------------------------------------------------------------===// +// Symbolic address operands +//===----------------------------------------------------------------------===// +def XtensaPCRelTargetAsmOperand : AsmOperandClass { + let Name = "PCRelTarget"; + let ParserMethod = "parsePCRelTarget"; + let PredicateMethod = "isImm"; + let RenderMethod = "addImmOperands"; +} + +def pcrel32call: Operand +{ + let PrintMethod = "printCallOperand"; + let EncoderMethod = "getCallEncoding"; + let DecoderMethod = "decodeCallOperand"; + let ParserMatchClass = XtensaPCRelTargetAsmOperand; +} + +def brtarget : Operand +{ + let PrintMethod = "printBranchTarget"; + let EncoderMethod = "getBranchTargetEncoding"; + let DecoderMethod = "decodeBranchOperand"; + let ParserMatchClass = XtensaPCRelTargetAsmOperand; +} + +def jumptarget: Operand +{ + let PrintMethod = "printJumpTarget"; + let EncoderMethod = "getJumpTargetEncoding"; + let DecoderMethod = "decodeJumpOperand"; + let ParserMatchClass = XtensaPCRelTargetAsmOperand; +} + +def L32Rtarget: Operand +{ + let PrintMethod = "printL32RTarget"; + let EncoderMethod = "getL32RTargetEncoding"; + let DecoderMethod = "decodeL32ROperand"; + let ParserMatchClass = XtensaPCRelTargetAsmOperand; +} + + +//===----------------------------------------------------------------------===// +// seimm7_22 predicate - Immediate in the range [7,22] for sign extend +def Seimm7_22_AsmOperand: ImmAsmOperand<"seimm7_22">; +def seimm7_22: Immediate= 7 && Imm <= 22; }], "Seimm7_22_AsmOperand"> { + let EncoderMethod = "getShimmSeimm7_22OpValue"; +} \ No newline at end of file diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td new file mode 100644 index 0000000000000..ddfec97fce82a --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -0,0 +1,111 @@ +//===- XtensaOperators.td - Xtensa-specific operators ------*- tblgen-*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===-------------------------------------------------------------------===// + +//===-------------------------------------------------------------------===// +// Type profiles +//===-------------------------------------------------------------------===// + +def SDT_XtensaCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_XtensaCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_XtensaCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; +def SDT_XtensaWrapPtr : SDTypeProfile<1, 1, + [SDTCisSameAs<0, 1>, + SDTCisPtrTy<0>]>; +def SDT_XtensaSelectCC : SDTypeProfile<1, 5, + [SDTCisSameAs<0, 1>, + SDTCisSameAs<2, 3>, + SDTCisVT<5, i32>]>; +def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; +def SDT_XtensaBrCC : SDTypeProfile<0, 2, [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; +def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, f32>]>; +def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; +def SDT_XtensaSelectCCFP : SDTypeProfile<1, 5, [SDTCisSameAs<0, 3>, SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; +def SDT_XtensaBrJT : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; +def SDT_XtensaSHL : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_XtensaSRA : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_XtensaSRL : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_XtensaSRC : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>]>; +def SDT_XtensaSSL : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; +def SDT_XtensaSSR : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; +def SDT_XtensaMEMBARRIER : SDTypeProfile<0, 0, []>; +def SDT_XtensaRUR : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; + +//===-------------------------------------------------------------------===// +// Node definitions +//===-------------------------------------------------------------------===// + +def Xtensa_call: SDNode<"XtensaISD::CALL", SDT_XtensaCall, + [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; + +def Xtensa_retflag: SDNode<"XtensaISD::RET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def Xtensa_retWflag: SDNode<"XtensaISD::RETW_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + +def Xtensa_callseq_start: SDNode<"ISD::CALLSEQ_START", SDT_XtensaCallSeqStart, + [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>; +def Xtensa_callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XtensaCallSeqEnd, + [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue, + SDNPOutGlue]>; + +def Xtensa_pcrel_wrapper: SDNode<"XtensaISD::PCREL_WRAPPER", SDT_XtensaWrapPtr, []>; + +def Xtensa_select : SDNode<"XtensaISD::SELECT", SDTSelect>; +def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC, + [SDNPInGlue]>; + +def Xtensa_movsp: SDNode<"XtensaISD::MOVSP", SDT_XtensaMOVSP, + [SDNPInGlue]>; + +def Xtensa_brcc_t : SDNode<"XtensaISD::BR_CC_T", SDT_XtensaBrCC, + [SDNPHasChain, SDNPInGlue]>; +def Xtensa_brcc_f : SDNode<"XtensaISD::BR_CC_F", SDT_XtensaBrCC, + [SDNPHasChain, SDNPInGlue]>; + +def Xtensa_cmpoeq : SDNode<"XtensaISD::CMPOEQ", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpolt : SDNode<"XtensaISD::CMPOLT", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpole : SDNode<"XtensaISD::CMPOLE", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpueq : SDNode<"XtensaISD::CMPUEQ", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpult : SDNode<"XtensaISD::CMPULT", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpule : SDNode<"XtensaISD::CMPULE", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpuo : SDNode<"XtensaISD::CMPUO", SDT_XtensaCmp, [SDNPOutGlue]>; + +def Xtensa_madd: SDNode<"XtensaISD::MADD", SDT_XtensaMADD, + [SDNPInGlue]>; +def Xtensa_msub: SDNode<"XtensaISD::MSUB", SDT_XtensaMADD, + [SDNPInGlue]>; +def Xtensa_movs: SDNode<"XtensaISD::MOVS", SDT_XtensaMOVS, + [SDNPInGlue]>; + +def Xtensa_shl: SDNode<"XtensaISD::SHL", SDT_XtensaSHL, + [SDNPInGlue]>; +def Xtensa_sra: SDNode<"XtensaISD::SRA", SDT_XtensaSRA, + [SDNPInGlue]>; +def Xtensa_srl: SDNode<"XtensaISD::SRL", SDT_XtensaSRL, + [SDNPInGlue]>; +def Xtensa_src: SDNode<"XtensaISD::SRC", SDT_XtensaSRC, + [SDNPInGlue]>; +def Xtensa_ssl: SDNode<"XtensaISD::SSL", SDT_XtensaSSL, + [SDNPOutGlue]>; +def Xtensa_ssr: SDNode<"XtensaISD::SSR", SDT_XtensaSSR, + [SDNPOutGlue]>; +def Xtensa_select_cc_fp: SDNode<"XtensaISD::SELECT_CC_FP", SDT_XtensaSelectCCFP, + [SDNPInGlue]>; + +def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, + [SDNPHasChain]>; +def Xtensa_callw: SDNode<"XtensaISD::CALLW", SDT_XtensaCall, + [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; +def Xtensa_mem_barrier: SDNode<"XtensaISD::MEMW", SDT_XtensaMEMBARRIER, + [SDNPHasChain, SDNPSideEffect]>; + +def Xtensa_rur: SDNode<"XtensaISD::RUR", SDT_XtensaRUR, + [SDNPInGlue]>; + diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp new file mode 100644 index 0000000000000..ae12ca9560b36 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -0,0 +1,188 @@ +//===- XtensaRegisterInfo.cpp - Xtensa Register Information ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Xtensa implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#include "XtensaRegisterInfo.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +#define DEBUG_TYPE "xtensa-reg-info" + +#define GET_REGINFO_TARGET_DESC +#include "XtensaGenRegisterInfo.inc" + +using namespace llvm; + +// WinABI callee save list - empty +static const MCPhysReg CSRWE_Xtensa_SaveList[] = {0}; + +// WinABI call preserved mask - empty +static const uint32_t CSRWE_Xtensa_RegMask[] = {0}; + +XtensaRegisterInfo::XtensaRegisterInfo(const XtensaSubtarget &STI) + : XtensaGenRegisterInfo(Xtensa::A0), Subtarget(STI) {} + +const uint16_t * +XtensaRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { + if (Subtarget.isWinABI()) + return CSRWE_Xtensa_SaveList; + else + return CSR_Xtensa_SaveList; +} + +const uint32_t * +XtensaRegisterInfo::getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID) const { + if (Subtarget.isWinABI()) + return CSRWE_Xtensa_RegMask; + else + return CSR_Xtensa_RegMask; +} + +BitVector XtensaRegisterInfo::getReservedRegs(const MachineFunction &MF) const { + BitVector Reserved(getNumRegs()); + const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); + + Reserved.set(Xtensa::A0); + if (TFI->hasFP(MF)) { + // fp is the frame pointer. Reserve all aliases. + Reserved.set(Xtensa::A15); + } + + // sp is the stack pointer. Reserve all aliases. + Reserved.set(Xtensa::SP); + Reserved.set(Xtensa::A7); + return Reserved; +} + +void XtensaRegisterInfo::eliminateFI(MachineBasicBlock::iterator II, + unsigned OpNo, int FrameIndex, + uint64_t StackSize, + int64_t SPOffset) const { + MachineInstr &MI = *II; + MachineFunction &MF = *MI.getParent()->getParent(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + + const std::vector &CSI = MFI.getCalleeSavedInfo(); + int MinCSFI = 0; + int MaxCSFI = -1; + + if (CSI.size()) { + MinCSFI = CSI[0].getFrameIdx(); + MaxCSFI = CSI[CSI.size() - 1].getFrameIdx(); + } + + // The following stack frame objects are always referenced relative to $sp: + // 1. Outgoing arguments. + // 2. Pointer to dynamically allocated stack space. + // 3. Locations for callee-saved registers. + // 4. Locations for eh data registers. + // Everything else is referenced relative to whatever register + // getFrameRegister() returns. + unsigned FrameReg; + + if ((FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)) + FrameReg = Xtensa::SP; + else + FrameReg = getFrameRegister(MF); + + // Calculate final offset. + // - There is no need to change the offset if the frame object is one of the + // following: an outgoing argument, pointer to a dynamically allocated + // stack space or a $gp restore location, + // - If the frame object is any of the following, its offset must be adjusted + // by adding the size of the stack: + // incoming argument, callee-saved register location or local variable. + bool IsKill = false; + int64_t Offset; + + Offset = SPOffset + (int64_t)StackSize; + Offset += MI.getOperand(OpNo + 1).getImm(); + + LLVM_DEBUG(errs() << "Offset : " << Offset << "\n" + << "<--------->\n"); + + bool Valid = false; + switch (MI.getOpcode()) { + case Xtensa::L8I_P: + case Xtensa::L8UI: + case Xtensa::S8I: + Valid = (Offset >= 0 && Offset <= 255); + break; + case Xtensa::L16SI: + case Xtensa::L16UI: + case Xtensa::S16I: + Valid = (Offset >= 0 && Offset <= 510); + break; + case Xtensa::LEA_ADD: + Valid = (Offset >= -128 && Offset <= 127); + break; + default: + Valid = (Offset >= 0 && Offset <= 1020); + break; + } + + // If MI is not a debug value, make sure Offset fits in the 16-bit immediate + // field. + if (!MI.isDebugValue() && !Valid) { + MachineBasicBlock &MBB = *MI.getParent(); + DebugLoc DL = II->getDebugLoc(); + unsigned ADD = Xtensa::ADD; + unsigned Reg; + const XtensaInstrInfo &TII = *static_cast( + MBB.getParent()->getSubtarget().getInstrInfo()); + + TII.loadImmediate(MBB, II, &Reg, Offset); + BuildMI(MBB, II, DL, TII.get(ADD), Reg) + .addReg(FrameReg) + .addReg(Reg, RegState::Kill); + + FrameReg = Reg; + Offset = 0; + IsKill = true; + } + + MI.getOperand(OpNo).ChangeToRegister(FrameReg, false, false, IsKill); + MI.getOperand(OpNo + 1).ChangeToImmediate(Offset); +} + +void XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, unsigned FIOperandNum, + RegScavenger *RS) const { + MachineInstr &MI = *II; + MachineFunction &MF = *MI.getParent()->getParent(); + + LLVM_DEBUG(errs() << "\nFunction : " << MF.getName() << "\n"; + errs() << "<--------->\n" + << MI); + + int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); + uint64_t stackSize = MF.getFrameInfo().getStackSize(); + int64_t spOffset = MF.getFrameInfo().getObjectOffset(FrameIndex); + + LLVM_DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n" + << "spOffset : " << spOffset << "\n" + << "stackSize : " << stackSize << "\n"); + + eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset); +} + +Register XtensaRegisterInfo::getFrameRegister(const MachineFunction &MF) const { + const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); + return TFI->hasFP(MF) ? (Subtarget.isWinABI() ? Xtensa::A7 : Xtensa::A15) + : Xtensa::SP; +} diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h new file mode 100644 index 0000000000000..43b73c035fe1c --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h @@ -0,0 +1,63 @@ +//===-- XtensaRegisterInfo.h - Xtensa Register Information Impl --*- C++-*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file contains the Xtensa implementation of the TargetRegisterInfo class. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAREGISTERINFO_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAREGISTERINFO_H + +#include "Xtensa.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" + +#define GET_REGINFO_HEADER +#include "XtensaGenRegisterInfo.inc" + +namespace llvm { +class TargetRegisterClass; +class XtensaInstrInfo; +class XtensaSubtarget; + +struct XtensaRegisterInfo : public XtensaGenRegisterInfo { +public: + const XtensaSubtarget &Subtarget; + + XtensaRegisterInfo(const XtensaSubtarget &STI); + + bool requiresRegisterScavenging(const MachineFunction &MF) const override { + return true; + } + + bool requiresFrameIndexScavenging(const MachineFunction &MF) const override { + return true; + } + + bool trackLivenessAfterRegAlloc(const MachineFunction &) const override { + return true; + } + + const uint16_t * + getCalleeSavedRegs(const MachineFunction *MF = 0) const override; + const uint32_t *getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID) const override; + BitVector getReservedRegs(const MachineFunction &MF) const override; + void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, + unsigned FIOperandNum, + RegScavenger *RS) const override; + Register getFrameRegister(const MachineFunction &MF) const override; + +private: + void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, + int FrameIndex, uint64_t StackSize, int64_t SPOffset) const; +}; + +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_REGISTERINFO_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td new file mode 100644 index 0000000000000..f45a41f929355 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -0,0 +1,219 @@ +//===- XtensaRegisterInfo.td - Xtensa Register defs -----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===-------------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Class definitions. +//===----------------------------------------------------------------------===// + +class XtensaReg : Register { + let Namespace = "Xtensa"; +} + +class XtensaRegWithSubRegs subregs> + : RegisterWithSubRegs { + let Namespace = "Xtensa"; +} + +//===----------------------------------------------------------------------===// +// General-purpose registers +//===----------------------------------------------------------------------===// + +// Xtensa general purpose regs +class ARReg num, string n, list alt = []> : XtensaReg { + let HWEncoding{3-0} = num; + let AltNames = alt; +} + +// Return Address +def A0 : ARReg<0, "a0">, DwarfRegNum<[0]>; + +// Stack Pointer (callee-saved) +def SP : ARReg<1, "a1", ["sp"]>, DwarfRegNum<[1]>; + +// Function Arguments +def A2 : ARReg<2, "a2">, DwarfRegNum<[2]>; +def A3 : ARReg<3, "a3">, DwarfRegNum<[3]>; +def A4 : ARReg<4, "a4">, DwarfRegNum<[4]>; +def A5 : ARReg<5, "a5">, DwarfRegNum<[5]>; +def A6 : ARReg<6, "a6">, DwarfRegNum<[6]>; +def A7 : ARReg<7, "a7">, DwarfRegNum<[7]>; + +// Static Chain +def A8 : ARReg<8, "a8">, DwarfRegNum<[8]>; + +def A9 : ARReg<9, "a9">, DwarfRegNum<[9]>; +def A10 : ARReg<10, "a10">, DwarfRegNum<[10]>; +def A11 : ARReg<11, "a11">, DwarfRegNum<[11]>; + +// Callee-saved +def A12 : ARReg<12, "a12">, DwarfRegNum<[12]>; +def A13 : ARReg<13, "a13">, DwarfRegNum<[13]>; +def A14 : ARReg<14, "a14">, DwarfRegNum<[14]>; + +// Stack-Frame Pointer (optional) - Callee-Saved +def A15 : ARReg<15, "a15">, DwarfRegNum<[15]>; + +// Register class with allocation order +def AR : RegisterClass<"Xtensa", [i32], 32, (add + A8, A9, A10, A11, A12, A13, A14, A15, + A7, A6, A5, A4, A3, A2, A0, SP)>; +//===----------------------------------------------------------------------===// +// Special-purpose registers +//===----------------------------------------------------------------------===// +class SRReg num, string n, list alt = []> : XtensaReg { + let HWEncoding{7-0} = num; + let AltNames = alt; +} + +// Shift Amount Register +def SAR : SRReg<3, "sar", ["SAR","3"]>; + +// Expected data value for S32C1I operation +def SCOMPARE1 : SRReg<12, "scompare1", ["SCOMPARE1"]>; + +// Instuction breakpoint enable register +def IBREAKENABLE : SRReg<96, "ibreakenable", ["IBREAKENABLE"]>; + +// Memory Control Register +def MEMCTL : SRReg<97, "memctl", ["MEMCTL"]>; + +// Instuction break address register 0 +def IBREAKA0 : SRReg<128, "ibreaka0", ["IBREAKA0"]>; + +// Instuction break address register 1 +def IBREAKA1 : SRReg<129, "ibreaka1", ["IBREAKA1"]>; + +// Data break address register 0 +def DBREAKA0 : SRReg<144, "dbreaka0", ["DBREAKA0"]>; + +// Data break address register 1 +def DBREAKA1 : SRReg<145, "dbreaka1", ["DBREAKA1"]>; + +// Data breakpoint control register 0 +def DBREAKC0 : SRReg<160, "dbreakc0", ["DBREAKC0"]>; + +// Data breakpoint control register 1 +def DBREAKC1 : SRReg<161, "dbreakc1", ["DBREAKC1"]>; + +def CONFIGID0 : SRReg<176, "configid0", ["CONFIGID0"]>; + +// Exception PC1 +def EPC1 : SRReg<177, "epc1", ["EPC1"]>; + +// Exception PC2 +def EPC2 : SRReg<178, "epc2", ["EPC2"]>; + +// Exception PC3 +def EPC3 : SRReg<179, "epc3", ["EPC3"]>; + +// Exception PC4 +def EPC4 : SRReg<180, "epc4", ["EPC4"]>; + +// Exception PC5 +def EPC5 : SRReg<181, "epc5", ["EPC5"]>; + +// Exception PC6 +def EPC6 : SRReg<182, "epc6", ["EPC6"]>; + +// Exception PC7 +def EPC7 : SRReg<183, "epc7", ["EPC7"]>; + +def CONFIGID1 : SRReg<208, "configid1", ["CONFIGID1"]>; + +// Interrupt enable mask register +def INTSET : SRReg<226, "intset", ["INTSET"]>; + +// Interrupt enable mask register +def INTENABLE : SRReg<228, "intenable", ["INTENABLE"]>; + +// Processor State +def PS : SRReg<230, "ps", ["PS", "230"]>; + +// Vector base register +def VECBASE : SRReg<231, "vecbase", ["VECBASE"]>; + +// Cause of last debug exception register +def DEBUGCAUSE : SRReg<233, "debugcause", ["DEBUGCAUSE"]>; + +// Processor Clock Count Register +def CCOUNT : SRReg<234, "ccount", ["CCOUNT"]>; + +// Processor ID Register +def PRID : SRReg<235, "prid", ["PRID"]>; + +// Cycle number to interrupt register 0 +def CCOMPARE0 : SRReg<240, "ccompare0", ["CCOMPARE0"]>; + +// Cycle number to interrupt register 1 +def CCOMPARE1 : SRReg<241, "ccompare1", ["CCOMPARE1"]>; + +// Cycle number to interrupt register 2 +def CCOMPARE2 : SRReg<242, "ccompare2", ["CCOMPARE2"]>; + +def SR : RegisterClass<"Xtensa", [i32], 32, (add SAR, SCOMPARE1, IBREAKENABLE, MEMCTL, IBREAKA0, IBREAKA1, DBREAKA0, DBREAKA1, + DBREAKC0, DBREAKC1, CONFIGID0, CONFIGID1, EPC1, EPC2, EPC3, EPC4, EPC5, EPC6, EPC7, INTSET, INTENABLE, PS, VECBASE, DEBUGCAUSE, CCOUNT, PRID, CCOMPARE0, CCOMPARE1, CCOMPARE2)>; + +//===----------------------------------------------------------------------===// +// USER registers +//===----------------------------------------------------------------------===// +class URReg num, string n, list alt = []> : XtensaReg { + let HWEncoding{15-0} = num; + let AltNames = alt; +} + +// Thread Pointer register +def THREADPTR : URReg<231, "threadptr", ["THREADPTR"]>; + +def UR : RegisterClass<"Xtensa", [i32], 32, (add THREADPTR)>; + +//===----------------------------------------------------------------------===// +// Floating-Point registers +//===----------------------------------------------------------------------===// + +// Xtensa Floating-Point regs +class FPReg num, string n> : XtensaReg { + let HWEncoding{3-0} = num; +} + +def F0 : FPReg<0, "f0">, DwarfRegNum<[19]>; +def F1 : FPReg<1, "f1">, DwarfRegNum<[20]>; +def F2 : FPReg<2, "f2">, DwarfRegNum<[21]>; +def F3 : FPReg<3, "f3">, DwarfRegNum<[22]>; +def F4 : FPReg<4, "f4">, DwarfRegNum<[23]>; +def F5 : FPReg<5, "f5">, DwarfRegNum<[24]>; +def F6 : FPReg<6, "f6">, DwarfRegNum<[25]>; +def F7 : FPReg<7, "f7">, DwarfRegNum<[26]>; +def F8 : FPReg<8, "f8">, DwarfRegNum<[27]>; +def F9 : FPReg<9, "f9">, DwarfRegNum<[28]>; +def F10 : FPReg<10, "f10">, DwarfRegNum<[29]>; +def F11 : FPReg<11, "f11">, DwarfRegNum<[30]>; +def F12 : FPReg<12, "f12">, DwarfRegNum<[31]>; +def F13 : FPReg<13, "f13">, DwarfRegNum<[32]>; +def F14 : FPReg<14, "f14">, DwarfRegNum<[33]>; +def F15 : FPReg<15, "f15">, DwarfRegNum<[34]>; + +// Floating-Point register class with allocation order +def FPR : RegisterClass<"Xtensa", [f32], 32, (add + F8, F9, F10, F11, F12, F13, F14, F15, + F7, F6, F5, F4, F3, F2, F1, F0)>; + +//===----------------------------------------------------------------------===// +// Boolean registers +//===----------------------------------------------------------------------===// +class BReg num, string n> : XtensaReg { + let HWEncoding{3-0} = num; +} + +foreach i = 0-15 in { + def B#i : BReg, DwarfRegNum<[i]>; +} + +// Boolean register class +def BR : RegisterClass<"Xtensa", [i1], 0, (add B0, B1, +B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)>; diff --git a/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp b/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp new file mode 100644 index 0000000000000..85aa4322c7108 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp @@ -0,0 +1,243 @@ +//===- XtensaSizeReductionPass.cpp - Xtensa Size Reduction ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen//MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-size-reduce-pass" + +STATISTIC(NumReduced, "Number of 24-bit instructions reduced to 16-bit ones"); + +class XtensaSizeReduce : public MachineFunctionPass { +public: + static char ID; + XtensaSizeReduce() : MachineFunctionPass(ID) {} + + const XtensaSubtarget *Subtarget; + static const XtensaInstrInfo *XtensaII; + + bool runOnMachineFunction(MachineFunction &MF) override; + + llvm::StringRef getPassName() const override { + return "Xtensa instruction size reduction pass"; + } + +private: + /// Reduces width of instructions in the specified basic block. + bool ReduceMBB(MachineBasicBlock &MBB); + + /// Attempts to reduce MI, returns true on success. + bool ReduceMI(const MachineBasicBlock::instr_iterator &MII); +}; + +char XtensaSizeReduce::ID = 0; +const XtensaInstrInfo *XtensaSizeReduce::XtensaII; + +bool XtensaSizeReduce::ReduceMI(const MachineBasicBlock::instr_iterator &MII) { + MachineInstr *MI = &*MII; + MachineBasicBlock &MBB = *MI->getParent(); + unsigned Opcode = MI->getOpcode(); + + switch (Opcode) { + case Xtensa::L32I: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + int64_t Imm = Op2.getImm(); + if (Imm >= 0 && Imm <= 60) { + // Replace L32I to L32I.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::L32I_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + } break; + + case Xtensa::S32I: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + int64_t Imm = Op2.getImm(); + if (Imm >= 0 && Imm <= 60) { + // Replace S32I to S32I.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::S32I_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + + } break; + + case Xtensa::MOVI: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + + int64_t Imm = Op1.getImm(); + if (Imm >= -32 && Imm <= 95) { + // Replace MOVI to MOVI.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MOVI_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + + } break; + + case Xtensa::ADD: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + // Replace ADD to ADD.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::ADD_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + + } break; + + case Xtensa::ADDI: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + int64_t Imm = Op2.getImm(); + if ((Imm >= 1 && Imm <= 15) || (Imm == -1)) { + // Replace ADDI to ADDI.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::ADDI_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + } break; + + case Xtensa::RET: { + // Replace RET to RET.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::RET_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } break; + + case Xtensa::RETW: { + // Replace RETW to RETW.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::RETW_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } break; + + default: + break; + } + + return false; +} + +bool XtensaSizeReduce::ReduceMBB(MachineBasicBlock &MBB) { + bool Modified = false; + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + E = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != E; MII = NextMII) { + NextMII = std::next(MII); + MachineInstr *MI = &*MII; + + // Don't reduce bundled instructions or pseudo operations + if (MI->isBundle() || MI->isTransient()) + continue; + + // Try to reduce 24-bit instruction into 16-bit instruction + Modified |= ReduceMI(MII); + } + + return Modified; +} + +bool XtensaSizeReduce::runOnMachineFunction(MachineFunction &MF) { + + Subtarget = &static_cast(MF.getSubtarget()); + XtensaII = static_cast(Subtarget->getInstrInfo()); + bool Modified = false; + + if (!Subtarget->hasDensity()) + return Modified; + + MachineFunction::iterator I = MF.begin(), E = MF.end(); + + for (; I != E; ++I) + Modified |= ReduceMBB(*I); + return Modified; +} + +FunctionPass *llvm::createXtensaSizeReductionPass() { + return new XtensaSizeReduce(); +} diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp new file mode 100644 index 0000000000000..333fcb9f68f8a --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -0,0 +1,58 @@ +//===- XtensaSubtarget.cpp - Xtensa Subtarget Information -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Xtensa specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#include "XtensaSubtarget.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "xtensa-subtarget" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "XtensaGenSubtargetInfo.inc" + +using namespace llvm; + +XtensaSubtarget & +XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { + std::string CPUName = CPU; + // CPU; + if (CPUName.empty()) { + // set default cpu name + CPUName = "esp32"; + } + + HasDensity = false; + HasSingleFloat = false; + HasLoop = false; + HasMAC16 = false; + HasWindowed = false; + HasBoolean = false; + HasSEXT = false; + HasNSA = false; + HasMul32 = false; + HasMul32High = false; + HasDiv32 = false; + HasS32C1I = false; + HasTHREADPTR = false; + + // Parse features string. + ParseSubtargetFeatures(CPUName, FS); + return *this; +} + +XtensaSubtarget::XtensaSubtarget(const Triple &TT, const std::string &CPU, + const std::string &FS, const TargetMachine &TM) + : XtensaGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT), + InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this), + TSInfo(), FrameLowering(), UseSmallSection(false) {} diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h new file mode 100644 index 0000000000000..db56a4a25ee1f --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -0,0 +1,130 @@ +//===-- XtensaSubtarget.h - Define Subtarget for the Xtensa ----*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Xtensa specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSASUBTARGET_H +#define LLVM_LIB_TARGET_XTENSA_XTENSASUBTARGET_H + +#include "XtensaFrameLowering.h" +#include "XtensaISelLowering.h" +#include "XtensaInstrInfo.h" +#include "XtensaRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGTargetInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/Target/TargetMachine.h" + +#define GET_SUBTARGETINFO_HEADER +#include "XtensaGenSubtargetInfo.inc" + +namespace llvm { +class StringRef; + +class XtensaSubtarget : public XtensaGenSubtargetInfo { +private: + Triple TargetTriple; + XtensaInstrInfo InstrInfo; + XtensaTargetLowering TLInfo; + SelectionDAGTargetInfo TSInfo; + XtensaFrameLowering FrameLowering; + bool UseSmallSection; + + // Enabled Xtensa Density extension + bool HasDensity; + + // Enabled Xtensa Single FP instructions + bool HasSingleFloat; + + // Enabled Xtensa Loop extension + bool HasLoop; + + // Enabled Xtensa MAC16 instructions + bool HasMAC16; + + // Enabled Xtensa Windowed Register option + bool HasWindowed; + + // Enabled Xtensa Boolean extension + bool HasBoolean; + + // Enable Xtensa Sign Extend option + bool HasSEXT; + + // Enable Xtensa NSA option + bool HasNSA; + + // Enable Xtensa Mul32 option + bool HasMul32; + + // Enable Xtensa Mul32High option + bool HasMul32High; + + // Enable Xtensa Div32 option + bool HasDiv32; + + // Enable Xtensa S32C1I option + bool HasS32C1I; + + // Enable Xtensa THREADPTR option + bool HasTHREADPTR; + + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); + +public: + XtensaSubtarget(const Triple &TT, const std::string &CPU, + const std::string &FS, const TargetMachine &TM); + + const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; } + const XtensaInstrInfo *getInstrInfo() const { return &InstrInfo; } + const XtensaRegisterInfo *getRegisterInfo() const { + return &InstrInfo.getRegisterInfo(); + } + + const XtensaTargetLowering *getTargetLowering() const { return &TLInfo; } + const SelectionDAGTargetInfo *getSelectionDAGInfo() const { return &TSInfo; } + + bool isWinABI() const { return hasWindowed(); }; + + bool hasDensity() const { return HasDensity; }; + + bool hasSingleFloat() const { return HasSingleFloat; }; + + bool hasWindowed() const { return HasWindowed; }; + + bool hasLoop() const { return HasLoop; }; + + bool hasMAC16() const { return HasMAC16; }; + + bool hasBoolean() const { return HasBoolean; }; + + bool hasSEXT() const { return HasSEXT; }; + + bool hasNSA() const { return HasNSA; }; + + bool hasMul32() const { return HasMul32; }; + + bool hasMul32High() const { return HasMul32High; }; + + bool hasDiv32() const { return HasDiv32; }; + + bool hasS32C1I() const { return HasS32C1I; }; + + bool hasTHREADPTR() const { return HasTHREADPTR; }; + + bool useSmallSection() const { return UseSmallSection; } + + // Automatically generated by tblgen. + void ParseSubtargetFeatures(StringRef CPU, StringRef FS); +}; +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSASUBTARGET_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp new file mode 100644 index 0000000000000..19438981ca77d --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -0,0 +1,104 @@ +//===- XtensaTargetMachine.cpp - Define TargetMachine for Xtensa ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implements the info about Xtensa target spec. +// +//===----------------------------------------------------------------------===// + +#include "XtensaTargetMachine.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Transforms/IPO/PassManagerBuilder.h" +#include "llvm/Transforms/Scalar.h" + +using namespace llvm; + +extern "C" void LLVMInitializeXtensaTarget() { + // Register the target. + RegisterTargetMachine A(TheXtensaTarget); +} + +static std::string computeDataLayout(const Triple &TT, StringRef CPU, + const TargetOptions &Options, + bool isLittle) { + std::string Ret = "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32"; + + return Ret; +} + +static Reloc::Model getEffectiveRelocModel(bool JIT, + Optional RM) { + if (!RM.hasValue() || JIT) + return Reloc::Static; + return *RM; +} + +XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Optional RM, + Optional CM, + CodeGenOpt::Level OL, bool JIT, + bool isLittle) + : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, + CPU, FS, Options, getEffectiveRelocModel(JIT, RM), + getEffectiveCodeModel(CM, CodeModel::Small), OL), + TLOF(std::make_unique()), + Subtarget(TT, CPU, FS, *this) { + initAsmInfo(); +} + +XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Optional RM, + Optional CM, + CodeGenOpt::Level OL, bool JIT) + : XtensaTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {} + +const XtensaSubtarget * +XtensaTargetMachine::getSubtargetImpl(const Function &F) const { + return &Subtarget; +} + +namespace { +/// Xtensa Code Generator Pass Configuration Options. +class XtensaPassConfig : public TargetPassConfig { +public: + XtensaPassConfig(XtensaTargetMachine &TM, PassManagerBase &PM) + : TargetPassConfig(TM, PM) {} + + XtensaTargetMachine &getXtensaTargetMachine() const { + return getTM(); + } + + void addIRPasses() override; + bool addInstSelector() override; + void addPreEmitPass() override; +}; +} // end anonymous namespace + +bool XtensaPassConfig::addInstSelector() { + addPass(createXtensaISelDag(getXtensaTargetMachine(), getOptLevel())); + return false; +} + +void XtensaPassConfig::addIRPasses() { addPass(createAtomicExpandPass()); } + +void XtensaPassConfig::addPreEmitPass() { + addPass(createXtensaSizeReductionPass()); + addPass(&BranchRelaxationPassID); +} + +TargetPassConfig *XtensaTargetMachine::createPassConfig(PassManagerBase &PM) { + return new XtensaPassConfig(*this, PM); +} diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h new file mode 100644 index 0000000000000..27d0e9a2ef149 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h @@ -0,0 +1,54 @@ +//===-- XtensaTargetMachine.h - Define TargetMachine for Xtensa -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Xtensa specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSATARGETMACHINE_H +#define LLVM_LIB_TARGET_XTENSA_XTENSATARGETMACHINE_H + +#include "XtensaSubtarget.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class TargetFrameLowering; + +extern Target TheXtensaTarget; + +class XtensaTargetMachine : public LLVMTargetMachine { + std::unique_ptr TLOF; + +public: + XtensaTargetMachine(const Target &T, const Triple &TT, StringRef CPU, + StringRef FS, const TargetOptions &Options, + Optional RM, Optional CM, + CodeGenOpt::Level OL, bool JIT, bool isLittle); + + XtensaTargetMachine(const Target &T, const Triple &TT, StringRef CPU, + StringRef FS, const TargetOptions &Options, + Optional RM, Optional CM, + CodeGenOpt::Level OL, bool JIT); + + // Override TargetMachine. + const XtensaSubtarget *getSubtargetImpl() const { return &Subtarget; } + const XtensaSubtarget *getSubtargetImpl(const Function &F) const override; + // Override LLVMTargetMachine + TargetPassConfig *createPassConfig(PassManagerBase &PM) override; + TargetLoweringObjectFile *getObjFileLowering() const override { + return TLOF.get(); + } + +protected: + XtensaSubtarget Subtarget; +}; +} // end namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSATARGETMACHINE_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp new file mode 100644 index 0000000000000..9c882aad39943 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp @@ -0,0 +1,148 @@ +//===- XtensaTargetObjectFile.cpp - Xtensa Object Files -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "XtensaTargetObjectFile.h" +#include "XtensaSubtarget.h" +#include "XtensaTargetMachine.h" +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Target/TargetMachine.h" +//#include "llvm/Support/ELF.h" +using namespace llvm; + +static cl::opt SSThreshold( + "xtensa-ssection-threshold", cl::Hidden, + cl::desc("Small data and bss section threshold size (default=8)"), + cl::init(8)); + +static cl::opt + LocalSData("xmlocal-sdata", cl::Hidden, + cl::desc("Xtensa: Use gp_rel for object-local data."), + cl::init(true)); + +static cl::opt ExternSData( + "xmextern-sdata", cl::Hidden, + cl::desc("Xtensa: Use gp_rel for data that is not defined by the " + "current object."), + cl::init(true)); + +void XtensaTargetObjectFile::Initialize(MCContext &Ctx, + const TargetMachine &TM) { + TargetLoweringObjectFileELF::Initialize(Ctx, TM); + InitializeELF(TM.Options.UseInitArray); + + LiteralSection = getContext().getELFSection( + ".literal", ELF::SHT_PROGBITS, ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); + + LiteralSection->setAlignment(Align(4)); + + SmallDataSection = getContext().getELFSection( + ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC); + + SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS, + ELF::SHF_WRITE | ELF::SHF_ALLOC); + this->TM = &static_cast(TM); +} + +// A address must be loaded from a small section if its size is less than the +// small section size threshold. Data in this section must be addressed using +// gp_rel operator. +static bool IsInSmallSection(uint64_t Size) { + // gcc has traditionally not treated zero-sized objects as small data, so this + // is effectively part of the ABI. + return Size > 0 && Size <= SSThreshold; +} + +/// Return true if this global address should be placed into small data/bss +/// section. +bool XtensaTargetObjectFile::IsGlobalInSmallSection( + const GlobalObject *GO, const TargetMachine &TM) const { + // We first check the case where global is a declaration, because finding + // section kind using getKindForGlobal() is only allowed for global + // definitions. + if (GO->isDeclaration() || GO->hasAvailableExternallyLinkage()) + return IsGlobalInSmallSectionImpl(GO, TM); + + return IsGlobalInSmallSection(GO, TM, getKindForGlobal(GO, TM)); +} + +/// Return true if this global address should be placed into small data/bss +/// section. +bool XtensaTargetObjectFile::IsGlobalInSmallSection(const GlobalObject *GO, + const TargetMachine &TM, + SectionKind Kind) const { + return (IsGlobalInSmallSectionImpl(GO, TM) && + (Kind.isData() || Kind.isBSS() || Kind.isCommon())); +} + +/// Return true if this global address should be placed into small data/bss +/// section. This method does all the work, except for checking the section +/// kind. +bool XtensaTargetObjectFile::IsGlobalInSmallSectionImpl( + const GlobalObject *GO, const TargetMachine &TM) const { + const XtensaSubtarget &Subtarget = + *static_cast(TM).getSubtargetImpl(); + + // Return if small section is not available. + if (!Subtarget.useSmallSection()) + return false; + + // Only global variables, not functions. + const GlobalVariable *GVA = dyn_cast(GO); + if (!GVA) + return false; + + // Enforce -mlocal-sdata. + if (!LocalSData && GO->hasLocalLinkage()) + return false; + + // Enforce -mextern-sdata. + if (!ExternSData && ((GO->hasExternalLinkage() && GO->isDeclaration()) || + GO->hasCommonLinkage())) + return false; + + Type *Ty = GO->getType()->getElementType(); + return IsInSmallSection( + GO->getParent()->getDataLayout().getTypeAllocSize(Ty)); +} + +MCSection *XtensaTargetObjectFile::SelectSectionForGlobal( + const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const { + // Handle Small Section classification here. + if (Kind.isBSS() && IsGlobalInSmallSection(GO, TM, Kind)) + return SmallBSSSection; + if (Kind.isData() && IsGlobalInSmallSection(GO, TM, Kind)) + return SmallDataSection; + if (Kind.isReadOnly() && IsGlobalInSmallSection(GO, TM, Kind)) + return SmallDataSection; + + // Otherwise, we work the same as ELF. + return TargetLoweringObjectFileELF::SelectSectionForGlobal(GO, Kind, TM); +} + +/// Return true if this constant should be placed into small data section. +bool XtensaTargetObjectFile::IsConstantInSmallSection( + const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const { + return (static_cast(TM) + .getSubtargetImpl() + ->useSmallSection() && + LocalSData && IsInSmallSection(DL.getTypeAllocSize(CN->getType()))); +} + +/// Return section for constant +MCSection *XtensaTargetObjectFile::getSectionForConstant( + const DataLayout &DL, SectionKind Kind, const Constant *C, + unsigned &Align) const { + return LiteralSection; +} diff --git a/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h new file mode 100644 index 0000000000000..4e98bc799318c --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h @@ -0,0 +1,48 @@ +//===-- XtensaTargetObjectFile.h - Xtensa Object Info ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_XTENSA_XTENSATARGETOBJECTFILE_H +#define LLVM_TARGET_XTENSA_XTENSATARGETOBJECTFILE_H + +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" + +namespace llvm { +class XtensaTargetMachine; +class XtensaTargetObjectFile : public TargetLoweringObjectFileELF { + MCSection *LiteralSection; + MCSection *SmallDataSection; + MCSection *SmallBSSSection; + const XtensaTargetMachine *TM; + +public: + void Initialize(MCContext &Ctx, const TargetMachine &TM) override; + + /// Return true if this global address should be placed into small data/bss + /// section. + bool IsGlobalInSmallSection(const GlobalObject *GO, const TargetMachine &TM, + SectionKind Kind) const; + bool IsGlobalInSmallSection(const GlobalObject *GO, + const TargetMachine &TM) const; + bool IsGlobalInSmallSectionImpl(const GlobalObject *GO, + const TargetMachine &TM) const; + + MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, + const TargetMachine &TM) const override; + + /// Return true if this constant should be placed into small data section. + bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, + const TargetMachine &TM) const; + + MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind, + const Constant *C, + unsigned &Align) const override; +}; +} // end namespace llvm + +#endif /* LLVM_TARGET_XTENSA_XTENSATARGETOBJECTFILE_H */